summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc39
-rw-r--r--deps/v8/src/allocation.cc187
-rw-r--r--deps/v8/src/allocation.h67
-rw-r--r--deps/v8/src/api-arguments-inl.h292
-rw-r--r--deps/v8/src/api-arguments.cc32
-rw-r--r--deps/v8/src/api-arguments.h81
-rw-r--r--deps/v8/src/api-natives.cc10
-rw-r--r--deps/v8/src/api.cc979
-rw-r--r--deps/v8/src/api.h24
-rw-r--r--deps/v8/src/arguments.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h30
-rw-r--r--deps/v8/src/arm/assembler-arm.cc130
-rw-r--r--deps/v8/src/arm/assembler-arm.h12
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc36
-rw-r--r--deps/v8/src/arm/codegen-arm.cc18
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc8
-rw-r--r--deps/v8/src/arm/disasm-arm.cc44
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc7
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc96
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h10
-rw-r--r--deps/v8/src/arm/simulator-arm.cc238
-rw-r--r--deps/v8/src/arm/simulator-arm.h124
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h30
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc56
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h15
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc152
-rw-r--r--deps/v8/src/arm64/constants-arm64.h1
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc14
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc51
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc7
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h25
-rw-r--r--deps/v8/src/arm64/instructions-arm64-constants.cc26
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc4
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc7
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h54
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc329
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h82
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc227
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h156
-rw-r--r--deps/v8/src/arm64/simulator-logic-arm64.cc4
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc4
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc36
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc2
-rw-r--r--deps/v8/src/assembler.cc36
-rw-r--r--deps/v8/src/assembler.h28
-rw-r--r--deps/v8/src/ast/ast-numbering.cc26
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h9
-rw-r--r--deps/v8/src/ast/ast.cc21
-rw-r--r--deps/v8/src/ast/ast.h71
-rw-r--r--deps/v8/src/ast/prettyprinter.cc43
-rw-r--r--deps/v8/src/ast/prettyprinter.h1
-rw-r--r--deps/v8/src/ast/scopes.cc26
-rw-r--r--deps/v8/src/ast/scopes.h14
-rw-r--r--deps/v8/src/bailout-reason.cc21
-rw-r--r--deps/v8/src/bailout-reason.h123
-rw-r--r--deps/v8/src/base/DEPS1
-rw-r--r--deps/v8/src/base/cpu.cc20
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc8
-rw-r--r--deps/v8/src/base/functional.cc8
-rw-r--r--deps/v8/src/base/ieee754.cc282
-rw-r--r--deps/v8/src/base/lazy-instance.h12
-rw-r--r--deps/v8/src/base/logging.cc2
-rw-r--r--deps/v8/src/base/logging.h18
-rw-r--r--deps/v8/src/base/macros.h57
-rw-r--r--deps/v8/src/base/once.cc4
-rw-r--r--deps/v8/src/base/once.h11
-rw-r--r--deps/v8/src/base/page-allocator.cc64
-rw-r--r--deps/v8/src/base/page-allocator.h41
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc58
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc33
-rw-r--r--deps/v8/src/base/platform/platform.h68
-rw-r--r--deps/v8/src/base/platform/semaphore.cc2
-rw-r--r--deps/v8/src/base/platform/time.cc3
-rw-r--r--deps/v8/src/base/safe_conversions.h3
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc4
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h4
-rw-r--r--deps/v8/src/bignum.cc2
-rw-r--r--deps/v8/src/bootstrapper.cc182
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc91
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc587
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc539
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h85
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc54
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc449
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc53
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc321
-rw-r--r--deps/v8/src/builtins/builtins-object.cc25
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc217
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc100
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h9
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc59
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc236
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h17
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc207
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-utils.h3
-rw-r--r--deps/v8/src/builtins/builtins.cc32
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc87
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc115
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc115
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc107
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc109
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc11
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc112
-rw-r--r--deps/v8/src/cached-powers.cc174
-rw-r--r--deps/v8/src/code-factory.cc26
-rw-r--r--deps/v8/src/code-factory.h4
-rw-r--r--deps/v8/src/code-stub-assembler.cc1250
-rw-r--r--deps/v8/src/code-stub-assembler.h218
-rw-r--r--deps/v8/src/code-stubs.cc43
-rw-r--r--deps/v8/src/code-stubs.h158
-rw-r--r--deps/v8/src/compilation-info.cc2
-rw-r--r--deps/v8/src/compilation-info.h6
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler.cc141
-rw-r--r--deps/v8/src/compiler.h8
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/access-builder.cc8
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc325
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc181
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc425
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h9
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc28
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc263
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc47
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h8
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc17
-rw-r--r--deps/v8/src/compiler/code-assembler.h14
-rw-r--r--deps/v8/src/compiler/code-generator.cc23
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.cc97
-rw-r--r--deps/v8/src/compiler/common-operator.h22
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc49
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h30
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc510
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h12
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc45
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc19
-rw-r--r--deps/v8/src/compiler/frame.cc11
-rw-r--r--deps/v8/src/compiler/frame.h49
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc14
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc36
-rw-r--r--deps/v8/src/compiler/graph-assembler.h16
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h1
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc761
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h43
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc53
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc316
-rw-r--r--deps/v8/src/compiler/instruction-codes.h14
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc30
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h29
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc348
-rw-r--r--deps/v8/src/compiler/instruction-selector.h31
-rw-r--r--deps/v8/src/compiler/instruction.cc4
-rw-r--r--deps/v8/src/compiler/instruction.h14
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc15
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc742
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h11
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc2109
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h29
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc29
-rw-r--r--deps/v8/src/compiler/js-graph.cc4
-rw-r--r--deps/v8/src/compiler/js-graph.h4
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc103
-rw-r--r--deps/v8/src/compiler/js-operator.cc43
-rw-r--r--deps/v8/src/compiler/js-operator.h48
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc5
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc121
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc41
-rw-r--r--deps/v8/src/compiler/linkage.h27
-rw-r--r--deps/v8/src/compiler/load-elimination.cc4
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc95
-rw-r--r--deps/v8/src/compiler/loop-peeling.h31
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc34
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h7
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc43
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc24
-rw-r--r--deps/v8/src/compiler/machine-operator.cc84
-rw-r--r--deps/v8/src/compiler/machine-operator.h22
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc228
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc169
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc245
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc190
-rw-r--r--deps/v8/src/compiler/node-properties.cc38
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h46
-rw-r--r--deps/v8/src/compiler/operation-typer.cc10
-rw-r--r--deps/v8/src/compiler/operation-typer.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc96
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc302
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc307
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc15
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc32
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h7
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc55
-rw-r--r--deps/v8/src/compiler/representation-change.cc67
-rw-r--r--deps/v8/src/compiler/representation-change.h23
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc212
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc321
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc135
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc315
-rw-r--r--deps/v8/src/compiler/simplified-operator.h178
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc2
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc4
-rw-r--r--deps/v8/src/compiler/type-cache.h2
-rw-r--r--deps/v8/src/compiler/typer.cc140
-rw-r--r--deps/v8/src/compiler/types.cc2
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc59
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc693
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h121
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc123
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc479
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h16
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc32
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc217
-rw-r--r--deps/v8/src/contexts-inl.h6
-rw-r--r--deps/v8/src/contexts.cc4
-rw-r--r--deps/v8/src/contexts.h19
-rw-r--r--deps/v8/src/conversions.cc7
-rw-r--r--deps/v8/src/counters-inl.h6
-rw-r--r--deps/v8/src/counters.cc99
-rw-r--r--deps/v8/src/counters.h160
-rw-r--r--deps/v8/src/d8-posix.cc14
-rw-r--r--deps/v8/src/d8.cc441
-rw-r--r--deps/v8/src/d8.h51
-rw-r--r--deps/v8/src/debug/debug-coverage.cc33
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc38
-rw-r--r--deps/v8/src/debug/debug-frames.h8
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc5
-rw-r--r--deps/v8/src/debug/debug.cc8
-rw-r--r--deps/v8/src/debug/liveedit.cc7
-rw-r--r--deps/v8/src/debug/mirrors.js2
-rw-r--r--deps/v8/src/deoptimize-reason.h40
-rw-r--r--deps/v8/src/deoptimizer.cc1194
-rw-r--r--deps/v8/src/deoptimizer.h110
-rw-r--r--deps/v8/src/disassembler.cc42
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/eh-frame.cc14
-rw-r--r--deps/v8/src/elements-kind.h37
-rw-r--r--deps/v8/src/elements.cc122
-rw-r--r--deps/v8/src/elements.h19
-rw-r--r--deps/v8/src/execution.cc65
-rw-r--r--deps/v8/src/execution.h13
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc18
-rw-r--r--deps/v8/src/external-reference-table.cc8
-rw-r--r--deps/v8/src/factory-inl.h8
-rw-r--r--deps/v8/src/factory.cc71
-rw-r--r--deps/v8/src/factory.h20
-rw-r--r--deps/v8/src/fast-dtoa.cc7
-rw-r--r--deps/v8/src/feedback-vector-inl.h9
-rw-r--r--deps/v8/src/feedback-vector.cc151
-rw-r--r--deps/v8/src/feedback-vector.h119
-rw-r--r--deps/v8/src/flag-definitions.h136
-rw-r--r--deps/v8/src/flags.cc78
-rw-r--r--deps/v8/src/frame-constants.h8
-rw-r--r--deps/v8/src/frames-inl.h12
-rw-r--r--deps/v8/src/frames.cc133
-rw-r--r--deps/v8/src/frames.h29
-rw-r--r--deps/v8/src/gdb-jit.cc60
-rw-r--r--deps/v8/src/global-handles.cc2
-rw-r--r--deps/v8/src/globals.h351
-rw-r--r--deps/v8/src/handles.cc2
-rw-r--r--deps/v8/src/heap-symbols.h15
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc3
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc21
-rw-r--r--deps/v8/src/heap/gc-tracer.cc185
-rw-r--r--deps/v8/src/heap/gc-tracer.h69
-rw-r--r--deps/v8/src/heap/heap-inl.h29
-rw-r--r--deps/v8/src/heap/heap.cc390
-rw-r--r--deps/v8/src/heap/heap.h240
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc2
-rw-r--r--deps/v8/src/heap/incremental-marking.cc35
-rw-r--r--deps/v8/src/heap/local-allocator.h2
-rw-r--r--deps/v8/src/heap/mark-compact.cc203
-rw-r--r--deps/v8/src/heap/mark-compact.h11
-rw-r--r--deps/v8/src/heap/object-stats.cc307
-rw-r--r--deps/v8/src/heap/object-stats.h67
-rw-r--r--deps/v8/src/heap/objects-visiting.h1
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h2
-rw-r--r--deps/v8/src/heap/scavenger.cc14
-rw-r--r--deps/v8/src/heap/scavenger.h2
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc18
-rw-r--r--deps/v8/src/heap/spaces-inl.h35
-rw-r--r--deps/v8/src/heap/spaces.cc768
-rw-r--r--deps/v8/src/heap/spaces.h297
-rw-r--r--deps/v8/src/heap/store-buffer.cc14
-rw-r--r--deps/v8/src/heap/store-buffer.h8
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc21
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h26
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc94
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h39
-rw-r--r--deps/v8/src/heap/sweeper.cc153
-rw-r--r--deps/v8/src/heap/sweeper.h61
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h19
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc80
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h34
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc30
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc12
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc162
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc46
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h10
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h46
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc937
-rw-r--r--deps/v8/src/ic/accessor-assembler.h91
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h57
-rw-r--r--deps/v8/src/ic/handler-configuration.cc327
-rw-r--r--deps/v8/src/ic/handler-configuration.h109
-rw-r--r--deps/v8/src/ic/ic-inl.h3
-rw-r--r--deps/v8/src/ic/ic.cc209
-rw-r--r--deps/v8/src/ic/ic.h4
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc44
-rw-r--r--deps/v8/src/ic/stub-cache.cc2
-rw-r--r--deps/v8/src/ic/stub-cache.h3
-rw-r--r--deps/v8/src/icu_util.cc4
-rw-r--r--deps/v8/src/inspector/BUILD.gn11
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/injected-script-source.js28
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js6
-rw-r--r--deps/v8/src/inspector/js_protocol.json4167
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl1370
-rw-r--r--deps/v8/src/inspector/string-16.cc32
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc5
-rw-r--r--deps/v8/src/inspector/v8-console-message.h1
-rw-r--r--deps/v8/src/inspector/v8-console.cc22
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc34
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc35
-rw-r--r--deps/v8/src/inspector/v8-debugger.h8
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc50
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc5
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc1
-rw-r--r--deps/v8/src/interface-descriptors.cc72
-rw-r--r--deps/v8/src/interface-descriptors.h80
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc28
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc400
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h26
-rw-r--r--deps/v8/src/interpreter/bytecodes.h9
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc2
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc21
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h6
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc206
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc6
-rw-r--r--deps/v8/src/isolate.cc397
-rw-r--r--deps/v8/src/isolate.h64
-rw-r--r--deps/v8/src/js/array.js58
-rw-r--r--deps/v8/src/js/prologue.js36
-rw-r--r--deps/v8/src/js/proxy.js27
-rw-r--r--deps/v8/src/js/v8natives.js64
-rw-r--r--deps/v8/src/json-parser.cc62
-rw-r--r--deps/v8/src/json-parser.h8
-rw-r--r--deps/v8/src/json-stringifier.cc68
-rw-r--r--deps/v8/src/keys.cc25
-rw-r--r--deps/v8/src/label.h5
-rw-r--r--deps/v8/src/layout-descriptor-inl.h5
-rw-r--r--deps/v8/src/layout-descriptor.h2
-rw-r--r--deps/v8/src/libplatform/default-platform.cc6
-rw-r--r--deps/v8/src/libplatform/default-platform.h3
-rw-r--r--deps/v8/src/log-utils.cc5
-rw-r--r--deps/v8/src/log-utils.h6
-rw-r--r--deps/v8/src/log.cc98
-rw-r--r--deps/v8/src/log.h22
-rw-r--r--deps/v8/src/lookup.cc57
-rw-r--r--deps/v8/src/machine-type.h2
-rw-r--r--deps/v8/src/messages.cc54
-rw-r--r--deps/v8/src/messages.h9
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h42
-rw-r--r--deps/v8/src/mips/assembler-mips.cc20
-rw-r--r--deps/v8/src/mips/assembler-mips.h5
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc48
-rw-r--r--deps/v8/src/mips/codegen-mips.cc20
-rw-r--r--deps/v8/src/mips/disasm-mips.cc4
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc7
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc76
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/mips/simulator-mips.cc203
-rw-r--r--deps/v8/src/mips/simulator-mips.h121
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h32
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc19
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h5
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc48
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc20
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc10
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc7
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc68
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc238
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h132
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h3
-rw-r--r--deps/v8/src/objects-debug.cc81
-rw-r--r--deps/v8/src/objects-inl.h1727
-rw-r--r--deps/v8/src/objects-printer.cc102
-rw-r--r--deps/v8/src/objects.cc613
-rw-r--r--deps/v8/src/objects.h985
-rw-r--r--deps/v8/src/objects/bigint.cc84
-rw-r--r--deps/v8/src/objects/bigint.h25
-rw-r--r--deps/v8/src/objects/code-inl.h34
-rw-r--r--deps/v8/src/objects/code.h21
-rw-r--r--deps/v8/src/objects/data-handler-inl.h41
-rw-r--r--deps/v8/src/objects/data-handler.h63
-rw-r--r--deps/v8/src/objects/debug-objects.h1
-rw-r--r--deps/v8/src/objects/descriptor-array.h1
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h634
-rw-r--r--deps/v8/src/objects/fixed-array.h601
-rw-r--r--deps/v8/src/objects/hash-table-inl.h109
-rw-r--r--deps/v8/src/objects/hash-table.h34
-rw-r--r--deps/v8/src/objects/js-array-inl.h29
-rw-r--r--deps/v8/src/objects/js-array.h4
-rw-r--r--deps/v8/src/objects/js-collection-inl.h49
-rw-r--r--deps/v8/src/objects/js-collection.h162
-rw-r--r--deps/v8/src/objects/js-regexp.h15
-rw-r--r--deps/v8/src/objects/literal-objects.h1
-rw-r--r--deps/v8/src/objects/map-inl.h645
-rw-r--r--deps/v8/src/objects/map.h151
-rw-r--r--deps/v8/src/objects/module.cc179
-rw-r--r--deps/v8/src/objects/module.h16
-rw-r--r--deps/v8/src/objects/object-macros.h3
-rw-r--r--deps/v8/src/objects/scope-info.h5
-rw-r--r--deps/v8/src/objects/script-inl.h35
-rw-r--r--deps/v8/src/objects/script.h23
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h2
-rw-r--r--deps/v8/src/objects/shared-function-info.h18
-rw-r--r--deps/v8/src/objects/string-inl.h31
-rw-r--r--deps/v8/src/objects/string.h7
-rw-r--r--deps/v8/src/ostreams.cc8
-rw-r--r--deps/v8/src/parsing/background-parsing-task.cc3
-rw-r--r--deps/v8/src/parsing/expression-classifier.h25
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc8
-rw-r--r--deps/v8/src/parsing/parse-info.cc14
-rw-r--r--deps/v8/src/parsing/parse-info.h10
-rw-r--r--deps/v8/src/parsing/parser-base.h387
-rw-r--r--deps/v8/src/parsing/parser.cc350
-rw-r--r--deps/v8/src/parsing/parser.h54
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc10
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc6
-rw-r--r--deps/v8/src/parsing/preparser.cc20
-rw-r--r--deps/v8/src/parsing/preparser.h25
-rw-r--r--deps/v8/src/parsing/rewriter.cc4
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc57
-rw-r--r--deps/v8/src/parsing/scanner.cc117
-rw-r--r--deps/v8/src/parsing/scanner.h17
-rw-r--r--deps/v8/src/parsing/token.h1
-rw-r--r--deps/v8/src/perf-jit.cc4
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h49
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc81
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h5
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc33
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc7
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc55
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h6
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc232
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h111
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc4
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc11
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h3
-rw-r--r--deps/v8/src/profiler/profile-generator.cc8
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc2
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc19
-rw-r--r--deps/v8/src/profiler/tick-sample.cc12
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc35
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc36
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc32
-rw-r--r--deps/v8/src/regexp/jsregexp.cc76
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc37
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc37
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc35
-rw-r--r--deps/v8/src/regexp/regexp-ast.h12
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc40
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc42
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc37
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc32
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc3
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc18
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc21
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc1
-rw-r--r--deps/v8/src/runtime/runtime-function.cc4
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc18
-rw-r--r--deps/v8/src/runtime/runtime-module.cc8
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc10
-rw-r--r--deps/v8/src/runtime/runtime-object.cc55
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc16
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc8
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc9
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc4
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc128
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc100
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc20
-rw-r--r--deps/v8/src/runtime/runtime.h40
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h42
-rw-r--r--deps/v8/src/s390/assembler-s390.cc38
-rw-r--r--deps/v8/src/s390/assembler-s390.h5
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc34
-rw-r--r--deps/v8/src/s390/codegen-s390.cc6
-rw-r--r--deps/v8/src/s390/constants-s390.cc312
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc8
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc55
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h6
-rw-r--r--deps/v8/src/s390/simulator-s390.cc198
-rw-r--r--deps/v8/src/s390/simulator-s390.h110
-rw-r--r--deps/v8/src/safepoint-table.cc83
-rw-r--r--deps/v8/src/safepoint-table.h31
-rw-r--r--deps/v8/src/simulator-base.cc95
-rw-r--r--deps/v8/src/simulator-base.h163
-rw-r--r--deps/v8/src/simulator.h108
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc49
-rw-r--r--deps/v8/src/snapshot/code-serializer.h2
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc2
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc56
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc26
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc11
-rw-r--r--deps/v8/src/snapshot/serializer-common.h3
-rw-r--r--deps/v8/src/snapshot/serializer.cc23
-rw-r--r--deps/v8/src/snapshot/serializer.h2
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc12
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc4
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc14
-rw-r--r--deps/v8/src/snapshot/snapshot.h2
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc39
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h14
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/strtod.cc32
-rw-r--r--deps/v8/src/third_party/utf8-decoder/LICENSE19
-rw-r--r--deps/v8/src/third_party/utf8-decoder/README.v818
-rw-r--r--deps/v8/src/third_party/utf8-decoder/utf8-decoder.h78
-rw-r--r--deps/v8/src/tracing/traced-value.cc2
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc15
-rw-r--r--deps/v8/src/transitions.cc4
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h6
-rw-r--r--deps/v8/src/type-hints.cc4
-rw-r--r--deps/v8/src/type-hints.h2
-rw-r--r--deps/v8/src/unicode-inl.h4
-rw-r--r--deps/v8/src/unicode.cc325
-rw-r--r--deps/v8/src/unicode.h7
-rw-r--r--deps/v8/src/uri.cc14
-rw-r--r--deps/v8/src/utils.h18
-rw-r--r--deps/v8/src/v8.cc15
-rw-r--r--deps/v8/src/v8.gyp69
-rw-r--r--deps/v8/src/value-serializer.cc46
-rw-r--r--deps/v8/src/vector-slot-pair.cc39
-rw-r--r--deps/v8/src/vector-slot-pair.h47
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h202
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h202
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h494
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h64
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc268
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h348
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc824
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h242
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h202
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h202
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h202
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h202
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h23
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h476
-rw-r--r--deps/v8/src/wasm/decoder.h31
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h263
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc39
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h17
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc31
-rw-r--r--deps/v8/src/wasm/memory-tracing.h31
-rw-r--r--deps/v8/src/wasm/module-compiler.cc702
-rw-r--r--deps/v8/src/wasm/module-compiler.h20
-rw-r--r--deps/v8/src/wasm/module-decoder.cc42
-rw-r--r--deps/v8/src/wasm/module-decoder.h36
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc97
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc (renamed from deps/v8/src/wasm/wasm-heap.cc)279
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h (renamed from deps/v8/src/wasm/wasm-heap.h)68
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc30
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc33
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h10
-rw-r--r--deps/v8/src/wasm/wasm-constants.h83
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc64
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc23
-rw-r--r--deps/v8/src/wasm/wasm-engine.h46
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc26
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc341
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h10
-rw-r--r--deps/v8/src/wasm/wasm-js.cc91
-rw-r--r--deps/v8/src/wasm/wasm-limits.h7
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc87
-rw-r--r--deps/v8/src/wasm/wasm-memory.h22
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc34
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.cc116
-rw-r--r--deps/v8/src/wasm/wasm-module.h41
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h35
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc986
-rw-r--r--deps/v8/src/wasm/wasm-objects.h204
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc65
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h229
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc183
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h80
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h19
-rw-r--r--deps/v8/src/x64/assembler-x64.cc137
-rw-r--r--deps/v8/src/x64/assembler-x64.h11
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc28
-rw-r--r--deps/v8/src/x64/codegen-x64.cc6
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc26
-rw-r--r--deps/v8/src/x64/disasm-x64.cc150
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc7
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc147
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h13
-rw-r--r--deps/v8/src/x64/simulator-x64.cc2
-rw-r--r--deps/v8/src/x64/simulator-x64.h42
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc8
-rw-r--r--deps/v8/src/zone/zone-containers.h5
-rw-r--r--deps/v8/src/zone/zone.cc9
-rw-r--r--deps/v8/src/zone/zone.h7
672 files changed, 35761 insertions, 26563 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index adaa0be3c6..eb89288685 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -102,7 +102,7 @@ void Accessors::ReconfigureToDataProperty(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope stats_scope(
- isolate, &RuntimeCallStats::ReconfigureToDataProperty);
+ isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
@@ -147,7 +147,8 @@ void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kArrayLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -159,7 +160,8 @@ void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthSetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
@@ -272,7 +274,8 @@ void Accessors::StringLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::StringLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kStringLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
@@ -546,9 +549,8 @@ void Accessors::ScriptEvalFromScriptGetter(
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
- if (!script->eval_from_shared()->IsUndefined(isolate)) {
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ if (script->has_eval_from_shared()) {
+ Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
if (eval_from_shared->script()->IsScript()) {
Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
result = Script::GetWrapper(eval_from_script);
@@ -608,9 +610,8 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
- if (!script->eval_from_shared()->IsUndefined(isolate)) {
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ if (script->has_eval_from_shared()) {
+ Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
result = Handle<Object>(shared->name(), isolate);
}
@@ -644,7 +645,7 @@ void Accessors::FunctionPrototypeGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::FunctionPrototypeGetter);
+ RuntimeCallCounterId::kFunctionPrototypeGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -657,7 +658,7 @@ void Accessors::FunctionPrototypeSetter(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::FunctionPrototypeSetter);
+ RuntimeCallCounterId::kFunctionPrototypeSetter);
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
@@ -681,7 +682,8 @@ void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionLengthGetter);
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -950,16 +952,17 @@ class FrameFunctionIterator {
private:
MaybeHandle<JSFunction> next() {
while (true) {
- inlined_frame_index_--;
- if (inlined_frame_index_ == -1) {
+ if (inlined_frame_index_ <= 0) {
if (!frame_iterator_.done()) {
frame_iterator_.Advance();
frames_.clear();
+ inlined_frame_index_ = -1;
GetFrames();
}
if (inlined_frame_index_ == -1) return MaybeHandle<JSFunction>();
- inlined_frame_index_--;
}
+
+ --inlined_frame_index_;
Handle<JSFunction> next_function =
frames_[inlined_frame_index_].AsJavaScript().function();
// Skip functions from other origins.
@@ -1057,7 +1060,7 @@ void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::BoundFunctionLengthGetter);
+ RuntimeCallCounterId::kBoundFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1084,7 +1087,7 @@ void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::BoundFunctionNameGetter);
+ RuntimeCallCounterId::kBoundFunctionNameGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index ab7b33a085..e17de159c1 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -6,7 +6,9 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
+#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -38,26 +40,44 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
return ptr;
}
+// TODO(bbudge) Simplify this once all embedders implement a page allocator.
+struct InitializePageAllocator {
+ static void Construct(void* page_allocator_ptr_arg) {
+ auto page_allocator_ptr =
+ reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
+ v8::PageAllocator* page_allocator =
+ V8::GetCurrentPlatform()->GetPageAllocator();
+ if (page_allocator == nullptr) {
+ static v8::base::PageAllocator default_allocator;
+ page_allocator = &default_allocator;
+ }
+ *page_allocator_ptr = page_allocator;
+ }
+};
+
+static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
+ page_allocator = LAZY_INSTANCE_INITIALIZER;
+
+v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
+
+// We will attempt allocation this many times. After each failure, we call
+// OnCriticalMemoryPressure to try to free some memory.
+const int kAllocationTries = 2;
+
} // namespace
void* Malloced::New(size_t size) {
- void* result = malloc(size);
+ void* result = AllocWithRetry(size);
if (result == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- result = malloc(size);
- if (result == nullptr) {
- V8::FatalProcessOutOfMemory("Malloced operator new");
- }
+ V8::FatalProcessOutOfMemory("Malloced operator new");
}
return result;
}
-
void Malloced::Delete(void* p) {
free(p);
}
-
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
@@ -66,7 +86,6 @@ char* StrDup(const char* str) {
return result;
}
-
char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
@@ -76,22 +95,31 @@ char* StrNDup(const char* str, int n) {
return result;
}
+void* AllocWithRetry(size_t size) {
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result = malloc(size);
+ if (result != nullptr) break;
+ if (!OnCriticalMemoryPressure(size)) break;
+ }
+ return result;
+}
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment));
- void* ptr = AlignedAllocInternal(size, alignment);
- if (ptr == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- ptr = AlignedAllocInternal(size, alignment);
- if (ptr == nullptr) {
- V8::FatalProcessOutOfMemory("AlignedAlloc");
- }
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result = AlignedAllocInternal(size, alignment);
+ if (result != nullptr) break;
+ if (!OnCriticalMemoryPressure(size + alignment)) break;
}
- return ptr;
+ if (result == nullptr) {
+ V8::FatalProcessOutOfMemory("AlignedAlloc");
+ }
+ return result;
}
-
void AlignedFree(void *ptr) {
#if V8_OS_WIN
_aligned_free(ptr);
@@ -103,27 +131,88 @@ void AlignedFree(void *ptr) {
#endif
}
-byte* AllocateSystemPage(void* address, size_t* allocated) {
- size_t page_size = base::OS::AllocatePageSize();
- void* result = base::OS::Allocate(address, page_size, page_size,
- base::OS::MemoryPermission::kReadWrite);
+size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
+
+size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
+
+void SetRandomMmapSeed(int64_t seed) {
+ GetPageAllocator()->SetRandomMmapSeed(seed);
+}
+
+void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
+
+void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) {
+ void* result = nullptr;
+ for (int i = 0; i < kAllocationTries; ++i) {
+ result =
+ GetPageAllocator()->AllocatePages(address, size, alignment, access);
+ if (result != nullptr) break;
+ size_t request_size = size + alignment - AllocatePageSize();
+ if (!OnCriticalMemoryPressure(request_size)) break;
+ }
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result, size);
+ }
+#endif
+ return result;
+}
+
+bool FreePages(void* address, const size_t size) {
+ bool result = GetPageAllocator()->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ }
+#endif
+ return result;
+}
+
+bool ReleasePages(void* address, size_t size, size_t new_size) {
+ DCHECK_LT(new_size, size);
+ bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
+ }
+#endif
+ return result;
+}
+
+bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ return GetPageAllocator()->SetPermissions(address, size, access);
+}
+
+byte* AllocatePage(void* address, size_t* allocated) {
+ size_t page_size = AllocatePageSize();
+ void* result =
+ AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
+bool OnCriticalMemoryPressure(size_t length) {
+ // TODO(bbudge) Rework retry logic once embedders implement the more
+ // informative overload.
+ if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
+ V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
+ }
+ return true;
+}
+
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
- size_t page_size = base::OS::AllocatePageSize();
+ size_t page_size = AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
- address_ = base::OS::Allocate(hint, alloc_size, alignment,
- base::OS::MemoryPermission::kNoAccess);
+ address_ =
+ AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
-#endif
}
}
@@ -139,9 +228,9 @@ void VirtualMemory::Reset() {
}
bool VirtualMemory::SetPermissions(void* address, size_t size,
- base::OS::MemoryPermission access) {
+ PageAllocator::Permission access) {
CHECK(InVM(address, size));
- bool result = base::OS::SetPermissions(address, size, access);
+ bool result = v8::internal::SetPermissions(address, size, access);
DCHECK(result);
USE(result);
return result;
@@ -149,8 +238,7 @@ bool VirtualMemory::SetPermissions(void* address, size_t size,
size_t VirtualMemory::Release(void* free_start) {
DCHECK(IsReserved());
- DCHECK(IsAddressAligned(static_cast<Address>(free_start),
- base::OS::CommitPageSize()));
+ DCHECK(IsAddressAligned(static_cast<Address>(free_start), CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
@@ -159,11 +247,7 @@ size_t VirtualMemory::Release(void* free_start) {
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(address_, size_);
- __lsan_register_root_region(address_, size_ - free_size);
-#endif
- CHECK(base::OS::Release(free_start, free_size));
+ CHECK(ReleasePages(address_, size_, size_ - free_size));
size_ -= free_size;
return free_size;
}
@@ -176,10 +260,7 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(address, size);
-#endif
- CHECK(base::OS::Free(address, size));
+ CHECK(FreePages(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
@@ -190,30 +271,22 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
- VirtualMemory first_try(size, hint);
- if (first_try.IsReserved()) {
- result->TakeControl(&first_try);
+ VirtualMemory vm(size, hint);
+ if (vm.IsReserved()) {
+ result->TakeControl(&vm);
return true;
}
-
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- VirtualMemory second_try(size, hint);
- result->TakeControl(&second_try);
- return result->IsReserved();
+ return false;
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
- VirtualMemory first_try(size, hint, alignment);
- if (first_try.IsReserved()) {
- result->TakeControl(&first_try);
+ VirtualMemory vm(size, hint, alignment);
+ if (vm.IsReserved()) {
+ result->TakeControl(&vm);
return true;
}
-
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- VirtualMemory second_try(size, hint, alignment);
- result->TakeControl(&second_try);
- return result->IsReserved();
+ return false;
}
} // namespace internal
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 668a0e6037..9bb47c8f05 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -72,14 +72,68 @@ class FreeStoreAllocationPolicy {
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
};
+// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
+// Call free to release memory allocated with this function.
+void* AllocWithRetry(size_t size);
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
-// Allocates a single system memory page with read/write permissions. The
-// address parameter is a hint. Returns the base address of the memory, or null
-// on failure. Permissions can be changed on the base address.
-byte* AllocateSystemPage(void* address, size_t* allocated);
+// Gets the page granularity for AllocatePages and FreePages. Addresses returned
+// by AllocatePages and AllocatePage are aligned to this size.
+V8_EXPORT_PRIVATE size_t AllocatePageSize();
+
+// Gets the granularity at which the permissions and release calls can be made.
+V8_EXPORT_PRIVATE size_t CommitPageSize();
+
+// Sets the random seed so that GetRandomMmapAddr() will generate repeatable
+// sequences of random mmap addresses.
+V8_EXPORT_PRIVATE void SetRandomMmapSeed(int64_t seed);
+
+// Generate a random address to be used for hinting allocation calls.
+V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+
+// Allocates memory. Permissions are set according to the access argument.
+// |address| is a hint. |size| and |alignment| must be multiples of
+// AllocatePageSize(). Returns the address of the allocated memory, with the
+// specified size and alignment, or nullptr on failure.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access);
+
+// Frees memory allocated by a call to AllocatePages. |address| and |size| must
+// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
+
+// Releases memory that is no longer needed. The range specified by |address|
+// and |size| must be an allocated memory region. |size| and |new_size| must be
+// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
+// Released memory is left in an undefined state, so it should not be accessed.
+// Returns true on success, otherwise false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
+ size_t new_size);
+
+// Sets permissions according to |access|. |address| and |size| must be
+// multiples of CommitPageSize(). Setting permission to kNoAccess may
+// cause the memory contents to be lost. Returns true on success, otherwise
+// false.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access);
+
+// Convenience function that allocates a single system page with read and write
+// permissions. |address| is a hint. Returns the base address of the memory and
+// the page size via |allocated| on success. Returns nullptr on failure.
+V8_EXPORT_PRIVATE
+V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
+
+// Function that may release reserved memory regions to allow failed allocations
+// to succeed. |length| is the amount of memory needed. Returns |true| if memory
+// could be released, false otherwise.
+V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
@@ -90,8 +144,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
- VirtualMemory(size_t size, void* hint,
- size_t alignment = base::OS::AllocatePageSize());
+ VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
@@ -131,7 +184,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
bool SetPermissions(void* address, size_t size,
- base::OS::MemoryPermission access);
+ PageAllocator::Permission access);
// Releases memory after |free_start|. Returns the number of bytes released.
size_t Release(void* free_start);
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 4035e715c1..b8336f97c4 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -13,146 +13,248 @@
namespace v8 {
namespace internal {
-#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
- do { \
- if (ISOLATE->needs_side_effect_check() && \
- !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
- return Handle<RETURN_TYPE>(); \
- } \
- } while (false)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
- F(AccessorNameGetterCallback, "get", v8::Value, Object) \
- F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
- Handle<Name> name) { \
+#define FOR_EACH_CALLBACK(F) \
+ F(Query, query, Object, v8::Integer) \
+ F(Deleter, deleter, Object, v8::Boolean)
+
+#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE) \
+ if (ISOLATE->needs_side_effect_check() && \
+ !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
+ return RETURN_VALUE(); \
+ } \
+ VMState<EXTERNAL> state(ISOLATE); \
+ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
+ PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
+
+#define CREATE_NAMED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
+ Handle<ReturnType> PropertyCallbackArguments::CallNamed##Function( \
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
+ DCHECK(interceptor->is_named()); \
+ DCHECK(!name->IsPrivate()); \
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols()); \
Isolate* isolate = this->isolate(); \
- SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
+ RuntimeCallTimerScope timer( \
+ isolate, RuntimeCallCounterId::kNamed##Function##Callback); \
+ DCHECK(!name->IsPrivate()); \
+ GenericNamedProperty##Function##Callback f = \
+ ToCData<GenericNamedProperty##Function##Callback>( \
+ interceptor->type()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
LOG(isolate, \
- ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
- f(v8::Utils::ToLocal(name), info); \
- return GetReturnValue<InternalReturn>(isolate); \
+ ApiNamedPropertyAccess("interceptor-named-" #type, holder(), *name)); \
+ f(v8::Utils::ToLocal(name), callback_info); \
+ return GetReturnValue<ReturnType>(isolate); \
}
-FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
-
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
-#undef WRITE_CALL_1_NAME
+FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
+#undef CREATE_NAMED_CALLBACK
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
- F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
- F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
- uint32_t index) { \
+#define CREATE_INDEXED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
+ Handle<ReturnType> PropertyCallbackArguments::CallIndexed##Function( \
+ Handle<InterceptorInfo> interceptor, uint32_t index) { \
+ DCHECK(!interceptor->is_named()); \
Isolate* isolate = this->isolate(); \
- SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
+ RuntimeCallTimerScope timer( \
+ isolate, RuntimeCallCounterId::kIndexed##Function##Callback); \
+ IndexedProperty##Function##Callback f = \
+ ToCData<IndexedProperty##Function##Callback>(interceptor->type()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #type, \
holder(), index)); \
- f(index, info); \
- return GetReturnValue<InternalReturn>(isolate); \
+ f(index, callback_info); \
+ return GetReturnValue<ReturnType>(isolate); \
}
-FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
+
+#undef FOR_EACH_CALLBACK
+#undef CREATE_INDEXED_CALLBACK
+
+Handle<Object> PropertyCallbackArguments::CallNamedGetter(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) {
+ DCHECK(interceptor->is_named());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ DCHECK(!name->IsPrivate());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedGetterCallback);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
+ GenericNamedPropertyGetterCallback f =
+ ToCData<GenericNamedPropertyGetterCallback>(interceptor->getter());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name) {
+ DCHECK(interceptor->is_named());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedDescriptorCallback);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name));
+ GenericNamedPropertyDescriptorCallback f =
+ ToCData<GenericNamedPropertyDescriptorCallback>(
+ interceptor->descriptor());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
+ GenericNamedPropertyGetterCallback f, Handle<Name> name) {
+ DCHECK(!name->IsPrivate());
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ f(v8::Utils::ToLocal(name), callback_info);
+ return GetReturnValue<Object>(isolate);
+}
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
-#undef WRITE_CALL_1_INDEX
+Handle<Object> PropertyCallbackArguments::CallNamedSetter(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name,
+ Handle<Object> value) {
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ GenericNamedPropertySetterCallback f =
+ ToCData<GenericNamedPropertySetterCallback>(interceptor->setter());
+ return CallNamedSetterCallback(f, name, value);
+}
-Handle<Object> PropertyCallbackArguments::Call(
+Handle<Object> PropertyCallbackArguments::CallNamedSetterCallback(
GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
+ DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedSetterCallback);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(
- GenericNamedPropertyDefinerCallback f, Handle<Name> name,
+Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
+ Handle<InterceptorInfo> interceptor, Handle<Name> name,
const v8::PropertyDescriptor& desc) {
+ DCHECK(interceptor->is_named());
+ DCHECK(!name->IsPrivate());
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kNamedDefinerCallback);
+ GenericNamedPropertyDefinerCallback f =
+ ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
- f(v8::Utils::ToLocal(name), desc, info);
+ f(v8::Utils::ToLocal(name), desc, callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
- uint32_t index,
- Handle<Object> value) {
+Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
+ Handle<InterceptorInfo> interceptor, uint32_t index, Handle<Object> value) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::IndexedPropertySetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallCounterId::kIndexedSetterCallback);
+ IndexedPropertySetterCallback f =
+ ToCData<IndexedPropertySetterCallback>(interceptor->setter());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
- f(index, v8::Utils::ToLocal(value), info);
+ f(index, v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
-Handle<Object> PropertyCallbackArguments::Call(
- IndexedPropertyDefinerCallback f, uint32_t index,
+Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
+ Handle<InterceptorInfo> interceptor, uint32_t index,
const v8::PropertyDescriptor& desc) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- SIDE_EFFECT_CHECK(isolate, f, Object);
- RuntimeCallTimerScope timer(
- isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kIndexedDefinerCallback);
+ IndexedPropertyDefinerCallback f =
+ ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
- f(index, desc, info);
+ f(index, desc, callback_info);
return GetReturnValue<Object>(isolate);
}
-void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
- Handle<Name> name, Handle<Object> value) {
+Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
+ Handle<InterceptorInfo> interceptor, uint32_t index) {
+ DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- if (isolate->needs_side_effect_check() &&
- !PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
- return;
- }
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::AccessorNameSetterCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<void> info(begin());
+ RuntimeCallCounterId::kNamedGetterCallback);
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
+ IndexedPropertyGetterCallback f =
+ ToCData<IndexedPropertyGetterCallback>(interceptor->getter());
+ return BasicCallIndexedGetterCallback(f, index);
+}
+
+Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
+ Handle<InterceptorInfo> interceptor, uint32_t index) {
+ DCHECK(!interceptor->is_named());
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kIndexedDescriptorCallback);
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor",
+ holder(), index));
+ IndexedPropertyDescriptorCallback f =
+ ToCData<IndexedPropertyDescriptorCallback>(interceptor->descriptor());
+ return BasicCallIndexedGetterCallback(f, index);
+}
+
+Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
+ IndexedPropertyGetterCallback f, uint32_t index) {
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ f(index, callback_info);
+ return GetReturnValue<Object>(isolate);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ // For now there is a single enumerator for indexed and named properties.
+ IndexedPropertyEnumeratorCallback f =
+ v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
+ // TODO(cbruni): assert same type for indexed and named callback.
+ Isolate* isolate = this->isolate();
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array);
+ f(callback_info);
+ return GetReturnValue<JSObject>(isolate);
+}
+
+// -------------------------------------------------------------------------
+// Accessors
+
+Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
+ Handle<AccessorInfo> info, Handle<Name> name) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kAccessorGetterCallback);
+ LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
+ AccessorNameGetterCallback f =
+ ToCData<AccessorNameGetterCallback>(info->getter());
+ return BasicCallNamedGetterCallback(f, name);
+}
+
+void PropertyCallbackArguments::CallAccessorSetter(
+ Handle<AccessorInfo> accessor_info, Handle<Name> name,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kAccessorSetterCallback);
+ AccessorNameSetterCallback f =
+ ToCData<AccessorNameSetterCallback>(accessor_info->setter());
+ PREPARE_CALLBACK_INFO(isolate, f, void, void);
+ LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
}
-#undef SIDE_EFFECT_CHECK
+#undef PREPARE_CALLBACK_INFO
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index c7c54e5de1..1302e32b66 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
@@ -18,7 +19,7 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
@@ -26,19 +27,22 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
return GetReturnValue<Object>(isolate);
}
-Handle<JSObject> PropertyCallbackArguments::Call(
- IndexedPropertyEnumeratorCallback f) {
- Isolate* isolate = this->isolate();
- if (isolate->needs_side_effect_check() &&
- !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
- return Handle<JSObject>();
- }
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Array> info(begin());
- f(info);
- return GetReturnValue<JSObject>(isolate);
+Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kNamedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(!interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kIndexedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
}
bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 179d787941..42d58b88a8 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -99,6 +99,54 @@ class PropertyCallbackArguments
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
+ // -------------------------------------------------------------------------
+ // Accessor Callbacks
+ // Also used for AccessorSetterCallback.
+ inline void CallAccessorSetter(Handle<AccessorInfo> info, Handle<Name> name,
+ Handle<Object> value);
+ // Also used for AccessorGetterCallback, AccessorNameGetterCallback.
+ inline Handle<Object> CallAccessorGetter(Handle<AccessorInfo> info,
+ Handle<Name> name);
+
+ // -------------------------------------------------------------------------
+ // Named Interceptor Callbacks
+ inline Handle<Object> CallNamedQuery(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedGetter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedSetter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name,
+ Handle<Object> value);
+ inline Handle<Object> CallNamedSetterCallback(
+ GenericNamedPropertySetterCallback callback, Handle<Name> name,
+ Handle<Object> value);
+ inline Handle<Object> CallNamedDefiner(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name,
+ const v8::PropertyDescriptor& desc);
+ inline Handle<Object> CallNamedDeleter(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ inline Handle<Object> CallNamedDescriptor(Handle<InterceptorInfo> interceptor,
+ Handle<Name> name);
+ Handle<JSObject> CallNamedEnumerator(Handle<InterceptorInfo> interceptor);
+
+ // -------------------------------------------------------------------------
+ // Indexed Interceptor Callbacks
+ inline Handle<Object> CallIndexedQuery(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedGetter(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedSetter(Handle<InterceptorInfo> interceptor,
+ uint32_t index, Handle<Object> value);
+ inline Handle<Object> CallIndexedDefiner(Handle<InterceptorInfo> interceptor,
+ uint32_t index,
+ const v8::PropertyDescriptor& desc);
+ inline Handle<Object> CallIndexedDeleter(Handle<InterceptorInfo> interceptor,
+ uint32_t index);
+ inline Handle<Object> CallIndexedDescriptor(
+ Handle<InterceptorInfo> interceptor, uint32_t index);
+ Handle<JSObject> CallIndexedEnumerator(Handle<InterceptorInfo> interceptor);
+
+ private:
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
@@ -107,35 +155,14 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
-
- inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
- inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
- Handle<Name> name);
- inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
- Handle<Name> name);
-
- inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
- inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
- inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
-
- inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
- Handle<Name> name, Handle<Object> value);
+ inline Handle<JSObject> CallPropertyEnumerator(
+ Handle<InterceptorInfo> interceptor);
- inline Handle<Object> Call(GenericNamedPropertyDefinerCallback f,
- Handle<Name> name,
- const v8::PropertyDescriptor& desc);
+ inline Handle<Object> BasicCallIndexedGetterCallback(
+ IndexedPropertyGetterCallback f, uint32_t index);
+ inline Handle<Object> BasicCallNamedGetterCallback(
+ GenericNamedPropertyGetterCallback f, Handle<Name> name);
- inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
- Handle<Object> value);
-
- inline Handle<Object> Call(IndexedPropertyDefinerCallback f, uint32_t index,
- const v8::PropertyDescriptor& desc);
-
- inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
- Handle<Object> value);
-
- private:
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 93698c9f52..b8f03a89a8 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -705,7 +705,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
- map->set_is_undetectable();
+ map->set_is_undetectable(true);
}
// Mark as needs_access_check if needed.
@@ -716,20 +716,20 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Set interceptor information in the map.
if (!obj->named_property_handler()->IsUndefined(isolate)) {
- map->set_has_named_interceptor();
+ map->set_has_named_interceptor(true);
map->set_may_have_interesting_symbols(true);
}
if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
- map->set_has_indexed_interceptor();
+ map->set_has_indexed_interceptor(true);
}
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
- map->set_is_callable();
+ map->set_is_callable(true);
map->set_is_constructor(true);
}
- if (immutable_proto) map->set_immutable_proto(true);
+ if (immutable_proto) map->set_is_immutable_proto(true);
return result;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 856be6368b..147cc397f2 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -8,9 +8,6 @@
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif // defined(LEAK_SANITIZER)
#include <cmath> // For isnan.
#include <limits>
#include <vector>
@@ -84,6 +81,7 @@
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -110,9 +108,9 @@ namespace v8 {
* TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
*/
-#define LOG_API(isolate, class_name, function_name) \
- i::RuntimeCallTimerScope _runtime_timer( \
- isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
+#define LOG_API(isolate, class_name, function_name) \
+ i::RuntimeCallTimerScope _runtime_timer( \
+ isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -326,9 +324,9 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
if (isolate == nullptr) {
// On a background thread -> we cannot retrieve memory information from the
// Isolate. Write easy-to-recognize values on the stack.
- memset(last_few_messages, 0x0badc0de, Heap::kTraceRingBufferSize + 1);
- memset(js_stacktrace, 0x0badc0de, Heap::kStacktraceBufferSize + 1);
- memset(&heap_stats, 0xbadc0de, sizeof(heap_stats));
+ memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
+ memset(js_stacktrace, 0x0BADC0DE, Heap::kStacktraceBufferSize + 1);
+ memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats));
// Note that the embedder's oom handler won't be called in this case. We
// just crash.
FATAL("API fatal error handler returned after process out of memory");
@@ -404,7 +402,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
- FatalErrorCallback callback = isolate->exception_behavior();
+ FatalErrorCallback callback = nullptr;
+ if (isolate != nullptr) {
+ callback = isolate->exception_behavior();
+ }
if (callback == nullptr) {
base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location,
message);
@@ -483,23 +484,34 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == nullptr ? data : memset(data, 0, length);
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ // Work around for GCC bug on AIX
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+ void* data = __linux_calloc(length, 1);
+#else
+ void* data = calloc(length, 1);
+#endif
+ return data;
}
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+
+ virtual void* AllocateUninitialized(size_t length) {
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ // Work around for GCC bug on AIX
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+ void* data = __linux_malloc(length);
+#else
+ void* data = malloc(length);
+#endif
+ return data;
+ }
+
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
- size_t page_size = base::OS::AllocatePageSize();
+ size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
- void* address =
- base::OS::Allocate(base::OS::GetRandomMmapAddr(), allocated, page_size,
- base::OS::MemoryPermission::kNoAccess);
-#if defined(LEAK_SANITIZER)
- if (address != nullptr) {
- __lsan_register_root_region(address, allocated);
- }
-#endif
+ void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
+ page_size, PageAllocator::kNoAccess);
return address;
}
@@ -510,7 +522,9 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- CHECK(base::OS::Free(data, length));
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ CHECK(i::FreePages(data, allocated));
return;
}
}
@@ -521,11 +535,11 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- base::OS::MemoryPermission permission =
+ PageAllocator::Permission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
- ? base::OS::MemoryPermission::kReadWrite
- : base::OS::MemoryPermission::kNoAccess;
- CHECK(base::OS::SetPermissions(data, length, permission));
+ ? PageAllocator::kReadWrite
+ : PageAllocator::kNoAccess;
+ CHECK(i::SetPermissions(data, length, permission));
}
};
@@ -562,7 +576,6 @@ struct SnapshotCreatorData {
: isolate_(isolate),
default_context_(),
contexts_(isolate),
- templates_(isolate),
created_(false) {}
static SnapshotCreatorData* cast(void* data) {
@@ -574,7 +587,6 @@ struct SnapshotCreatorData {
Persistent<Context> default_context_;
SerializeInternalFieldsCallback default_embedder_fields_serializer_;
PersistentValueVector<Context> contexts_;
- PersistentValueVector<Template> templates_;
std::vector<SerializeInternalFieldsCallback> embedder_fields_serializers_;
bool created_;
};
@@ -634,23 +646,81 @@ size_t SnapshotCreator::AddContext(Local<Context> context,
DCHECK(!data->created_);
Isolate* isolate = data->isolate_;
CHECK_EQ(isolate, context->GetIsolate());
- size_t index = static_cast<int>(data->contexts_.Size());
+ size_t index = data->contexts_.Size();
data->contexts_.Append(context);
data->embedder_fields_serializers_.push_back(callback);
return index;
}
size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
- DCHECK(!template_obj.IsEmpty());
+ return AddData(template_obj);
+}
+
+size_t SnapshotCreator::AddData(i::Object* object) {
+ DCHECK_NOT_NULL(object);
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
DCHECK(!data->created_);
- DCHECK_EQ(reinterpret_cast<i::Isolate*>(data->isolate_),
- Utils::OpenHandle(*template_obj)->GetIsolate());
- size_t index = static_cast<int>(data->templates_.Size());
- data->templates_.Append(template_obj);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::ArrayList> list;
+ if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ list = i::ArrayList::New(isolate, 1);
+ } else {
+ list = i::Handle<i::ArrayList>(
+ i::ArrayList::cast(isolate->heap()->serialized_objects()));
+ }
+ size_t index = static_cast<size_t>(list->Length());
+ list = i::ArrayList::Add(list, obj);
+ isolate->heap()->SetSerializedObjects(*list);
return index;
}
+size_t SnapshotCreator::AddData(Local<Context> context, i::Object* object) {
+ DCHECK_NOT_NULL(object);
+ DCHECK(!SnapshotCreatorData::cast(data_)->created_);
+ i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
+ i::Isolate* isolate = ctx->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::ArrayList> list;
+ if (!ctx->serialized_objects()->IsArrayList()) {
+ list = i::ArrayList::New(isolate, 1);
+ } else {
+ list =
+ i::Handle<i::ArrayList>(i::ArrayList::cast(ctx->serialized_objects()));
+ }
+ size_t index = static_cast<size_t>(list->Length());
+ list = i::ArrayList::Add(list, obj);
+ ctx->set_serialized_objects(*list);
+ return index;
+}
+
+namespace {
+void ConvertSerializedObjectsToFixedArray(Local<Context> context) {
+ i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
+ i::Isolate* isolate = ctx->GetIsolate();
+ if (!ctx->serialized_objects()->IsArrayList()) {
+ ctx->set_serialized_objects(isolate->heap()->empty_fixed_array());
+ } else {
+ i::Handle<i::ArrayList> list(i::ArrayList::cast(ctx->serialized_objects()));
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ ctx->set_serialized_objects(*elements);
+ }
+}
+
+void ConvertSerializedObjectsToFixedArray(i::Isolate* isolate) {
+ if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ isolate->heap()->SetSerializedObjects(isolate->heap()->empty_fixed_array());
+ } else {
+ i::Handle<i::ArrayList> list(
+ i::ArrayList::cast(isolate->heap()->serialized_objects()));
+ i::Handle<i::FixedArray> elements = i::ArrayList::Elements(list);
+ isolate->heap()->SetSerializedObjects(*elements);
+ }
+}
+} // anonymous namespace
+
StartupData SnapshotCreator::CreateBlob(
SnapshotCreator::FunctionCodeHandling function_code_handling) {
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
@@ -661,15 +731,16 @@ StartupData SnapshotCreator::CreateBlob(
int num_additional_contexts = static_cast<int>(data->contexts_.Size());
{
- int num_templates = static_cast<int>(data->templates_.Size());
i::HandleScope scope(isolate);
- i::Handle<i::FixedArray> templates =
- isolate->factory()->NewFixedArray(num_templates, i::TENURED);
- for (int i = 0; i < num_templates; i++) {
- templates->set(i, *v8::Utils::OpenHandle(*data->templates_.Get(i)));
+ // Convert list of context-independent data to FixedArray.
+ ConvertSerializedObjectsToFixedArray(isolate);
+
+ // Convert lists of context-dependent data to FixedArray.
+ ConvertSerializedObjectsToFixedArray(
+ data->default_context_.Get(data->isolate_));
+ for (int i = 0; i < num_additional_contexts; i++) {
+ ConvertSerializedObjectsToFixedArray(data->contexts_.Get(i));
}
- isolate->heap()->SetSerializedTemplates(*templates);
- data->templates_.Clear();
// We need to store the global proxy size upfront in case we need the
// bootstrapper to create a global proxy before we deserialize the context.
@@ -695,13 +766,13 @@ StartupData SnapshotCreator::CreateBlob(
i::DisallowHeapAllocation no_gc_from_here_on;
- std::vector<i::Object*> contexts;
- contexts.reserve(num_additional_contexts);
- i::Object* default_context;
+ int num_contexts = num_additional_contexts + 1;
+ std::vector<i::Context*> contexts;
+ contexts.reserve(num_contexts);
{
i::HandleScope scope(isolate);
- default_context =
- *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_));
+ contexts.push_back(
+ *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_)));
data->default_context_.Reset();
for (int i = 0; i < num_additional_contexts; i++) {
i::Handle<i::Context> context =
@@ -711,6 +782,10 @@ StartupData SnapshotCreator::CreateBlob(
data->contexts_.Clear();
}
+ // Check that values referenced by global/eternal handles are accounted for.
+ i::SerializedHandleChecker handle_checker(isolate, &contexts);
+ CHECK(handle_checker.CheckGlobalAndEternalHandles());
+
// Complete in-object slack tracking for all functions.
i::HeapIterator heap_iterator(isolate->heap());
while (i::HeapObject* current_obj = heap_iterator.next()) {
@@ -724,26 +799,18 @@ StartupData SnapshotCreator::CreateBlob(
// Serialize each context with a new partial serializer.
std::vector<i::SnapshotData*> context_snapshots;
- context_snapshots.reserve(num_additional_contexts + 1);
+ context_snapshots.reserve(num_contexts);
// TODO(6593): generalize rehashing, and remove this flag.
bool can_be_rehashed = true;
- {
- // The default context is created with a handler for embedder fields which
- // determines how they are handled if encountered during serialization.
+ for (int i = 0; i < num_contexts; i++) {
+ bool is_default_context = i == 0;
i::PartialSerializer partial_serializer(
isolate, &startup_serializer,
- data->default_embedder_fields_serializer_);
- partial_serializer.Serialize(&default_context, false);
- can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
- context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
- }
-
- for (int i = 0; i < num_additional_contexts; i++) {
- i::PartialSerializer partial_serializer(
- isolate, &startup_serializer, data->embedder_fields_serializers_[i]);
- partial_serializer.Serialize(&contexts[i], true);
+ is_default_context ? data->default_embedder_fields_serializer_
+ : data->embedder_fields_serializers_[i - 1]);
+ partial_serializer.Serialize(&contexts[i], !is_default_context);
can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
@@ -767,6 +834,7 @@ StartupData SnapshotCreator::CreateBlob(
delete context_snapshot;
}
data->created_ = true;
+
return result;
}
@@ -911,7 +979,8 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
- set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
+ set_max_old_space_size(
+ static_cast<int>(i::Heap::ComputeMaxOldGenerationSize(physical_memory)));
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
@@ -926,9 +995,7 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
void SetResourceConstraints(i::Isolate* isolate,
const ResourceConstraints& constraints) {
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
- size_t old_space_size =
- static_cast<size_t>(
- static_cast<unsigned int>(constraints.max_old_space_size()));
+ int old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
@@ -1409,10 +1476,10 @@ Local<FunctionTemplate> FunctionTemplate::New(
MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < templates->length()) {
- i::Object* info = templates->get(int_index);
+ if (int_index < serialized_objects->length()) {
+ i::Object* info = serialized_objects->get(int_index);
if (info->IsFunctionTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
i::FunctionTemplateInfo::cast(info)));
@@ -1593,10 +1660,6 @@ Local<ObjectTemplate> ObjectTemplate::New(
}
-Local<ObjectTemplate> ObjectTemplate::New() {
- return New(i::Isolate::Current(), Local<FunctionTemplate>());
-}
-
static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
@@ -1626,10 +1689,10 @@ Local<ObjectTemplate> ObjectTemplate::New(
MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < templates->length()) {
- i::Object* info = templates->get(int_index);
+ if (int_index < serialized_objects->length()) {
+ i::Object* info = serialized_objects->get(int_index);
if (info->IsObjectTemplateInfo()) {
return Utils::ToLocal(
i::Handle<i::ObjectTemplateInfo>(i::ObjectTemplateInfo::cast(info)));
@@ -1748,11 +1811,10 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
i::Isolate* isolate, Getter getter, Setter setter, Query query,
Descriptor descriptor, Deleter remover, Enumerator enumerator,
Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
- DCHECK(query == nullptr ||
- descriptor == nullptr); // Either intercept attributes or descriptor.
- DCHECK(query == nullptr ||
- definer ==
- nullptr); // Only use descriptor callback with definer callback.
+ // Either intercept attributes or descriptor.
+ DCHECK(query == nullptr || descriptor == nullptr);
+ // Only use descriptor callback with definer callback.
+ DCHECK(query == nullptr || definer == nullptr);
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
@@ -1781,6 +1843,32 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
template <typename Getter, typename Setter, typename Query, typename Descriptor,
typename Deleter, typename Enumerator, typename Definer>
+static i::Handle<i::InterceptorInfo> CreateNamedInterceptorInfo(
+ i::Isolate* isolate, Getter getter, Setter setter, Query query,
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ auto interceptor =
+ CreateInterceptorInfo(isolate, getter, setter, query, descriptor, remover,
+ enumerator, definer, data, flags);
+ interceptor->set_is_named(true);
+ return interceptor;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
+static i::Handle<i::InterceptorInfo> CreateIndexedInterceptorInfo(
+ i::Isolate* isolate, Getter getter, Setter setter, Query query,
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ auto interceptor =
+ CreateInterceptorInfo(isolate, getter, setter, query, descriptor, remover,
+ enumerator, definer, data, flags);
+ interceptor->set_is_named(false);
+ return interceptor;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
static void ObjectTemplateSetNamedPropertyHandler(
ObjectTemplate* templ, Getter getter, Setter setter, Query query,
Descriptor descriptor, Deleter remover, Enumerator enumerator,
@@ -1790,11 +1878,13 @@ static void ObjectTemplateSetNamedPropertyHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, templ);
EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
- auto obj = CreateInterceptorInfo(isolate, getter, setter, query, descriptor,
- remover, enumerator, definer, data, flags);
+ auto obj =
+ CreateNamedInterceptorInfo(isolate, getter, setter, query, descriptor,
+ remover, enumerator, definer, data, flags);
cons->set_named_property_handler(*obj);
}
+// TODO(cbruni) deprecate.
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
@@ -1867,12 +1957,12 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
i::Handle<i::AccessCheckInfo>::cast(struct_info);
SET_FIELD_WRAPPED(info, set_callback, callback);
- auto named_interceptor = CreateInterceptorInfo(
+ auto named_interceptor = CreateNamedInterceptorInfo(
isolate, named_handler.getter, named_handler.setter, named_handler.query,
named_handler.descriptor, named_handler.deleter, named_handler.enumerator,
named_handler.definer, named_handler.data, named_handler.flags);
info->set_named_interceptor(*named_interceptor);
- auto indexed_interceptor = CreateInterceptorInfo(
+ auto indexed_interceptor = CreateIndexedInterceptorInfo(
isolate, indexed_handler.getter, indexed_handler.setter,
indexed_handler.query, indexed_handler.descriptor,
indexed_handler.deleter, indexed_handler.enumerator,
@@ -1895,10 +1985,10 @@ void ObjectTemplate::SetHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
- auto obj = CreateInterceptorInfo(isolate, config.getter, config.setter,
- config.query, config.descriptor,
- config.deleter, config.enumerator,
- config.definer, config.data, config.flags);
+ auto obj = CreateIndexedInterceptorInfo(
+ isolate, config.getter, config.setter, config.query, config.descriptor,
+ config.deleter, config.enumerator, config.definer, config.data,
+ config.flags);
cons->set_indexed_property_handler(*obj);
}
@@ -2239,11 +2329,6 @@ Local<Value> Module::GetModuleNamespace() {
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
-bool Module::Instantiate(Local<Context> context,
- Module::ResolveCallback callback) {
- return InstantiateModule(context, callback).FromMaybe(false);
-}
-
Maybe<bool> Module::InstantiateModule(Local<Context> context,
Module::ResolveCallback callback) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -2361,18 +2446,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
return CompileUnboundInternal(v8_isolate, source, options, no_cache_reason);
}
-Local<UnboundScript> ScriptCompiler::CompileUnbound(
- Isolate* v8_isolate, Source* source, CompileOptions options,
- NoCacheReason no_cache_reason) {
- Utils::ApiCheck(
- !source->GetResourceOptions().IsModule(),
- "v8::ScriptCompiler::CompileUnbound",
- "v8::ScriptCompiler::CompileModule must be used to compile modules");
- RETURN_TO_LOCAL_UNCHECKED(
- CompileUnboundInternal(v8_isolate, source, options, no_cache_reason),
- UnboundScript);
-}
-
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Source* source,
CompileOptions options,
@@ -2389,13 +2462,6 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
return result->BindToCurrentContext();
}
-Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate, Source* source,
- CompileOptions options,
- NoCacheReason no_cache_reason) {
- auto context = v8_isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
-}
-
MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
Source* source) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -2459,57 +2525,27 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
Function);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
- i::Handle<i::String> source_string;
- auto factory = isolate->factory();
- if (arguments_count) {
- source_string = factory->NewStringFromStaticChars("(function(");
- for (size_t i = 0; i < arguments_count; ++i) {
- IsIdentifierHelper helper;
- if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
- return Local<Function>();
- }
- has_pending_exception =
- !factory->NewConsString(source_string,
- Utils::OpenHandle(*arguments[i]))
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- if (i + 1 == arguments_count) continue;
- has_pending_exception =
- !factory->NewConsString(source_string,
- factory->LookupSingleCharacterStringFromCode(
- ',')).ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- }
- i::Handle<i::String> brackets;
- brackets = factory->NewStringFromStaticChars("){");
- has_pending_exception = !factory->NewConsString(source_string, brackets)
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- } else {
- source_string = factory->NewStringFromStaticChars("(function(){");
- }
-
- int scope_position = source_string->length();
- has_pending_exception =
- !factory->NewConsString(source_string,
- Utils::OpenHandle(*source->source_string))
- .ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
- // Include \n in case the source contains a line end comment.
- auto brackets = factory->NewStringFromStaticChars("\n})");
- has_pending_exception =
- !factory->NewConsString(source_string, brackets).ToHandle(&source_string);
- RETURN_ON_FAILED_EXECUTION(Function);
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
isolate);
+
+ i::Handle<i::JSFunction> fun;
+ i::Handle<i::FixedArray> arguments_list =
+ isolate->factory()->NewFixedArray(static_cast<int>(arguments_count));
+ for (int i = 0; i < static_cast<int>(arguments_count); i++) {
+ IsIdentifierHelper helper;
+ i::Handle<i::String> argument = Utils::OpenHandle(*arguments[i]);
+ if (!helper.Check(*argument)) return Local<Function>();
+ arguments_list->set(i, *argument);
+ }
+
for (size_t i = 0; i < context_extension_count; ++i) {
i::Handle<i::JSReceiver> extension =
Utils::OpenHandle(*context_extensions[i]);
if (!extension->IsJSObject()) return Local<Function>();
i::Handle<i::JSFunction> closure(context->closure(), isolate);
- context = factory->NewWithContext(
+ context = isolate->factory()->NewWithContext(
closure, context,
i::ScopeInfo::CreateForWithScope(
isolate, context->IsNativeContext()
@@ -2519,8 +2555,6 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
}
i::Handle<i::Object> name_obj;
- int eval_scope_position = 0;
- int eval_position = i::kNoSourcePosition;
int line_offset = 0;
int column_offset = 0;
if (!source->resource_name.IsEmpty()) {
@@ -2532,27 +2566,15 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
if (!source->resource_column_offset.IsEmpty()) {
column_offset = static_cast<int>(source->resource_column_offset->Value());
}
- i::Handle<i::JSFunction> fun;
- has_pending_exception =
- !i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::LanguageMode::kSloppy,
- i::ONLY_SINGLE_FUNCTION_LITERAL, i::kNoSourcePosition,
- eval_scope_position, eval_position, line_offset,
- column_offset - scope_position, name_obj, source->resource_options)
- .ToHandle(&fun);
- if (has_pending_exception) {
- isolate->ReportPendingMessages();
- }
- RETURN_ON_FAILED_EXECUTION(Function);
- i::Handle<i::Object> result;
+ i::Handle<i::JSFunction> result;
has_pending_exception =
- !i::Execution::Call(isolate, fun,
- Utils::OpenHandle(*v8_context->Global()), 0,
- nullptr).ToHandle(&result);
+ !i::Compiler::GetWrappedFunction(
+ Utils::OpenHandle(*source->source_string), arguments_list, context,
+ line_offset, column_offset, name_obj, source->resource_options)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Function);
- RETURN_ESCAPED(
- Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(result)));
+ RETURN_ESCAPED(Utils::CallableToLocal(result));
}
@@ -2587,6 +2609,9 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
i::StreamedSource* source = v8_source->impl();
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ i::Script::InitLineEnds(script);
+ }
if (!origin.ResourceName().IsEmpty()) {
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
@@ -2643,23 +2668,49 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
RETURN_ESCAPED(bound);
}
-
-Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
- StreamedSource* v8_source,
- Local<String> full_source_string,
- const ScriptOrigin& origin) {
- auto context = v8_isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(
- Compile(context, v8_source, full_source_string, origin), Script);
-}
-
-
uint32_t ScriptCompiler::CachedDataVersionTag() {
return static_cast<uint32_t>(base::hash_combine(
internal::Version::Hash(), internal::FlagList::Hash(),
static_cast<uint32_t>(internal::CpuFeatures::SupportedFeatures())));
}
+ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
+ Local<UnboundScript> unbound_script, Local<String> source) {
+ i::Handle<i::SharedFunctionInfo> shared =
+ i::Handle<i::SharedFunctionInfo>::cast(
+ Utils::OpenHandle(*unbound_script));
+ i::Isolate* isolate = shared->GetIsolate();
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ base::ElapsedTimer timer;
+ if (i::FLAG_profile_deserialization) {
+ timer.Start();
+ }
+ i::HistogramTimerScope histogram_timer(
+ isolate->counters()->compile_serialize());
+ i::RuntimeCallTimerScope runtimeTimer(
+ isolate, i::RuntimeCallCounterId::kCompileSerialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
+
+ DCHECK(shared->is_toplevel());
+ i::Handle<i::Script> script(i::Script::cast(shared->script()));
+ // TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
+ // context independent.
+ if (script->ContainsAsmModule()) return nullptr;
+ if (isolate->debug()->is_loaded()) return nullptr;
+
+ i::ScriptData* script_data =
+ i::CodeSerializer::Serialize(isolate, shared, Utils::OpenHandle(*source));
+ CachedData* result = new CachedData(
+ script_data->data(), script_data->length(), CachedData::BufferOwned);
+ script_data->ReleaseDataOwnership();
+ delete script_data;
+
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("[Serializing took %0.3f ms]\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ return result;
+}
MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
ScriptOrigin* origin) {
@@ -2691,24 +2742,6 @@ Local<Script> Script::Compile(v8::Local<String> source,
// --- E x c e p t i o n s ---
-
-v8::TryCatch::TryCatch()
- : isolate_(i::Isolate::Current()),
- next_(isolate_->try_catch_handler()),
- is_verbose_(false),
- can_continue_(true),
- capture_message_(true),
- rethrow_(false),
- has_terminated_(false) {
- ResetInternal();
- // Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ =
- reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
- isolate_, i::GetCurrentStackPosition()));
- isolate_->RegisterTryCatchHandler(this);
-}
-
-
v8::TryCatch::TryCatch(v8::Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
next_(isolate_->try_catch_handler()),
@@ -2963,13 +2996,6 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
}
-int Message::GetEndColumn() const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- const int default_value = kNoColumnInfo;
- return GetEndColumn(context).FromMaybe(default_value);
-}
-
-
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -3030,65 +3056,6 @@ int StackTrace::GetFrameCount() const {
return Utils::OpenHandle(this)->length();
}
-namespace {
-i::Handle<i::JSObject> NewFrameObject(i::Isolate* isolate,
- i::Handle<i::StackFrameInfo> frame) {
- i::Handle<i::JSObject> frame_obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- i::JSObject::AddProperty(
- frame_obj, handle(isolate->heap()->line_string()),
- handle(i::Smi::FromInt(frame->line_number() + 1), isolate), i::NONE);
- i::JSObject::AddProperty(
- frame_obj, handle(isolate->heap()->column_string()),
- handle(i::Smi::FromInt(frame->column_number() + 1), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptId")),
- handle(i::Smi::FromInt(frame->script_id()), isolate),
- i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptName")),
- handle(frame->script_name(), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptNameOrSourceURL")),
- handle(frame->script_name_or_source_url(), isolate),
- i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("functionName")),
- handle(frame->function_name(), isolate), i::NONE);
- i::JSObject::AddProperty(frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("isEval")),
- isolate->factory()->ToBoolean(frame->is_eval()),
- i::NONE);
- i::JSObject::AddProperty(
- frame_obj,
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("isConstructor")),
- isolate->factory()->ToBoolean(frame->is_constructor()), i::NONE);
- return frame_obj;
-}
-} // namespace
-
-Local<Array> StackTrace::AsArray() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::Handle<i::FixedArray> self = Utils::OpenHandle(this);
- int frame_count = self->length();
- i::Handle<i::FixedArray> frames =
- isolate->factory()->NewFixedArray(frame_count);
- for (int i = 0; i < frame_count; ++i) {
- auto obj = handle(self->get(i), isolate);
- auto frame = i::Handle<i::StackFrameInfo>::cast(obj);
- i::Handle<i::JSObject> frame_obj = NewFrameObject(isolate, frame);
- frames->set(i, *frame_obj);
- }
- return Utils::ToLocal(isolate->factory()->NewJSArrayWithElements(
- frames, i::PACKED_ELEMENTS, frame_count));
-}
-
Local<StackTrace> StackTrace::CurrentStackTrace(
Isolate* isolate,
@@ -3193,10 +3160,6 @@ MaybeLocal<Value> JSON::Parse(Local<Context> context,
RETURN_ESCAPED(result);
}
-Local<Value> JSON::Parse(Local<String> json_string) {
- RETURN_TO_LOCAL_UNCHECKED(Parse(Local<Context>(), json_string), Value);
-}
-
MaybeLocal<String> JSON::Stringify(Local<Context> context,
Local<Value> json_object,
Local<String> gap) {
@@ -3707,12 +3670,6 @@ MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
}
-Local<String> Value::ToDetailString(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToDetailString(isolate->GetCurrentContext()),
- String);
-}
-
-
MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
@@ -3806,11 +3763,6 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
}
-Local<Uint32> Value::ToUint32(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToUint32(isolate->GetCurrentContext()), Uint32);
-}
-
-
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(),
@@ -3866,6 +3818,15 @@ void v8::Symbol::CheckCast(v8::Value* that) {
}
+void v8::Private::CheckCast(v8::Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsSymbol() &&
+ i::Handle<i::Symbol>::cast(obj)->is_private(),
+ "v8::Private::Cast",
+ "Could not convert to private");
+}
+
+
void v8::Number::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsNumber(),
@@ -4180,17 +4141,6 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
}
-Local<Uint32> Value::ToArrayIndex() const {
- auto self = Utils::OpenHandle(this);
- if (self->IsSmi()) {
- if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self);
- return Local<Uint32>();
- }
- auto context = ContextFromHeapObject(self);
- RETURN_TO_LOCAL_UNCHECKED(ToArrayIndex(context), Uint32);
-}
-
-
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
@@ -4469,39 +4419,6 @@ Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
return success;
}
-MUST_USE_RESULT
-static i::MaybeHandle<i::Object> DefineObjectProperty(
- i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
- i::Handle<i::Object> value, i::PropertyAttributes attrs) {
- i::Isolate* isolate = js_object->GetIsolate();
- bool success = false;
- i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, js_object, key, &success, i::LookupIterator::OWN);
- if (!success) return i::MaybeHandle<i::Object>();
-
- return i::JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, value, attrs, i::JSObject::FORCE_FIELD);
-}
-
-
-Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
- v8::Local<Value> key, v8::Local<Value> value,
- v8::PropertyAttribute attribs) {
- auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8_NO_SCRIPT(isolate, context, Object, ForceSet, Nothing<bool>(),
- i::HandleScope);
- auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- auto key_obj = Utils::OpenHandle(*key);
- auto value_obj = Utils::OpenHandle(*value);
- has_pending_exception =
- DefineObjectProperty(self, key_obj, value_obj,
- static_cast<i::PropertyAttributes>(attribs))
- .is_null();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
-}
-
-
Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4595,12 +4512,6 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
}
-PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetPropertyAttributes(context, key)
- .FromMaybe(static_cast<PropertyAttribute>(i::NONE));
-}
-
MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
@@ -4618,11 +4529,6 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
-Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<Name> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyDescriptor(context, key), Value);
-}
-
Local<Value> v8::Object::GetPrototype() {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -4650,11 +4556,6 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
}
-bool v8::Object::SetPrototype(Local<Value> value) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return SetPrototype(context, value).FromMaybe(false);
-}
-
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
auto self = Utils::OpenHandle(this);
@@ -4733,12 +4634,6 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
}
-Local<String> v8::Object::ObjectProtoToString() {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(ObjectProtoToString(context), String);
-}
-
-
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
@@ -4850,12 +4745,6 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
}
-bool v8::Object::Delete(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return Delete(context, index).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
@@ -4867,11 +4756,6 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
}
-bool v8::Object::Has(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return Has(context, index).FromMaybe(false);
-}
-
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
@@ -4918,27 +4802,6 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
}
-bool Object::SetAccessor(Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter, v8::Local<Value> data,
- AccessControl settings, PropertyAttribute attributes) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes, i::FLAG_disable_old_api_accessors)
- .FromMaybe(false);
-}
-
-
-bool Object::SetAccessor(Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter,
- v8::Local<Value> data, AccessControl settings,
- PropertyAttribute attributes) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes, i::FLAG_disable_old_api_accessors)
- .FromMaybe(false);
-}
-
-
void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
Local<Function> setter,
PropertyAttribute attribute,
@@ -4992,12 +4855,6 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
return result;
}
-bool v8::Object::HasOwnProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return HasOwnProperty(context, key).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5099,14 +4956,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
}
-Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetRealNamedPropertyInPrototypeChain(context, key),
- Value);
-}
-
-
Maybe<PropertyAttribute>
v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Local<Context> context, Local<Name> key) {
@@ -5133,13 +4982,6 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
}
-Maybe<PropertyAttribute>
-v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetRealNamedPropertyAttributesInPrototypeChain(context, key);
-}
-
-
MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetRealNamedProperty, Value);
@@ -5156,12 +4998,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
}
-Local<Value> v8::Object::GetRealNamedProperty(Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetRealNamedProperty(context, key), Value);
-}
-
-
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5183,13 +5019,6 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
}
-Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
- Local<String> key) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return GetRealNamedPropertyAttributes(context, key);
-}
-
-
Local<v8::Object> v8::Object::Clone() {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
@@ -5245,15 +5074,6 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
}
-Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
- v8::Local<v8::Value> argv[]) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
- RETURN_TO_LOCAL_UNCHECKED(CallAsFunction(context, recv, argc, argv_cast),
- Value);
-}
-
-
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5272,13 +5092,6 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
}
-Local<v8::Value> Object::CallAsConstructor(int argc,
- v8::Local<v8::Value> argv[]) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
- RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
-}
-
MaybeLocal<Function> Function::New(Local<Context> context,
FunctionCallback callback, Local<Value> data,
int length, ConstructorBehavior behavior) {
@@ -5300,12 +5113,6 @@ Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
}
-Local<v8::Object> Function::NewInstance() const {
- return NewInstance(Isolate::GetCurrent()->GetCurrentContext(), 0, nullptr)
- .FromMaybe(Local<Object>());
-}
-
-
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -5324,13 +5131,6 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
}
-Local<v8::Object> Function::NewInstance(int argc,
- v8::Local<v8::Value> argv[]) const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(NewInstance(context, argc, argv), Object);
-}
-
-
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
@@ -5340,6 +5140,8 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
+ Utils::ApiCheck(!self.is_null(), "v8::Function::Call",
+ "Function to be called is a null pointer");
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -5474,16 +5276,6 @@ int Function::GetScriptColumnNumber() const {
}
-bool Function::IsBuiltin() const {
- auto self = Utils::OpenHandle(this);
- if (!self->IsJSFunction()) {
- return false;
- }
- auto func = i::Handle<i::JSFunction>::cast(self);
- return !func->shared()->IsUserJavaScript();
-}
-
-
int Function::ScriptId() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
@@ -6397,7 +6189,9 @@ HeapStatistics::HeapStatistics()
heap_size_limit_(0),
malloced_memory_(0),
peak_malloced_memory_(0),
- does_zap_garbage_(0) {}
+ does_zap_garbage_(0),
+ number_of_native_contexts_(0),
+ number_of_detached_contexts_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
@@ -6415,10 +6209,6 @@ HeapObjectStatistics::HeapObjectStatistics()
HeapCodeStatistics::HeapCodeStatistics()
: code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {}
-bool v8::V8::InitializeICU(const char* icu_data_file) {
- return i::InitializeICU(icu_data_file);
-}
-
bool v8::V8::InitializeICUDefaultLocation(const char* exec_path,
const char* icu_data_file) {
return i::InitializeICUDefaultLocation(exec_path, icu_data_file);
@@ -6724,7 +6514,31 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
-size_t Context::EstimatedSize() { return 0; }
+namespace {
+i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
+ i::FixedArray* list, size_t index) {
+ if (index < static_cast<size_t>(list->length())) {
+ int int_index = static_cast<int>(index);
+ i::Object* object = list->get(int_index);
+ if (!object->IsTheHole(isolate)) {
+ list->set_the_hole(isolate, int_index);
+ // Shrink the list so that the last element is not the hole.
+ int last = list->length() - 1;
+ while (last >= 0 && list->is_the_hole(isolate, last)) last--;
+ list->Shrink(last + 1);
+ return i::Handle<i::Object>(object, isolate).location();
+ }
+ }
+ return nullptr;
+}
+} // anonymous namespace
+
+i::Object** Context::GetDataFromSnapshotOnce(size_t index) {
+ auto context = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = context->GetIsolate();
+ i::FixedArray* list = i::FixedArray::cast(context->serialized_objects());
+ return GetSerializedDataFromFixedArray(i_isolate, list, index);
+}
MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, ObjectTemplate, NewInstance, Object);
@@ -6742,6 +6556,29 @@ Local<v8::Object> ObjectTemplate::NewInstance() {
RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
}
+void v8::ObjectTemplate::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsObjectTemplateInfo(), "v8::ObjectTemplate::Cast",
+ "Could not convert to object template");
+}
+
+void v8::FunctionTemplate::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::FunctionTemplate::Cast",
+ "Could not convert to function template");
+}
+
+void v8::Signature::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::Signature::Cast",
+ "Could not convert to signature");
+}
+
+void v8::AccessorSignature::CheckCast(Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFunctionTemplateInfo(), "v8::AccessorSignature::Cast",
+ "Could not convert to accessor signature");
+}
MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, FunctionTemplate, GetFunction, Function);
@@ -6915,16 +6752,6 @@ MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
}
-Local<String> String::NewFromOneByte(Isolate* isolate,
- const uint8_t* data,
- NewStringType type,
- int length) {
- NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data,
- static_cast<v8::NewStringType>(type), length);
- RETURN_TO_LOCAL_UNCHECKED(result, String);
-}
-
-
MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
v8::NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length);
@@ -6991,12 +6818,6 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
}
-Local<String> v8::String::NewExternal(
- Isolate* isolate, v8::String::ExternalStringResource* resource) {
- RETURN_TO_LOCAL_UNCHECKED(NewExternalTwoByte(isolate, resource), String);
-}
-
-
MaybeLocal<String> v8::String::NewExternalOneByte(
Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
CHECK(resource && resource->data());
@@ -7133,11 +6954,6 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
}
-Local<v8::Value> v8::BooleanObject::New(bool value) {
- return New(Isolate::GetCurrent(), value);
-}
-
-
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -7306,27 +7122,6 @@ uint32_t v8::Array::Length() const {
}
-MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
- uint32_t index) {
- PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object);
- auto self = Utils::OpenHandle(this);
- if (!self->HasObjectElements()) return Local<Object>();
- i::FixedArray* elms = i::FixedArray::cast(self->elements());
- i::Object* paragon = elms->get(index);
- if (!paragon->IsJSObject()) return Local<Object>();
- i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- Local<Object> result;
- has_pending_exception =
- !ToLocal<Object>(isolate->factory()->CopyJSObject(paragon_handle),
- &result);
- RETURN_ON_FAILED_EXECUTION(Object);
- RETURN_ESCAPED(result);
-}
-
-
-Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
-
-
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Map, New);
@@ -7643,12 +7438,6 @@ MaybeLocal<Promise> Promise::Catch(Local<Context> context,
}
-Local<Promise> Promise::Catch(Local<Function> handler) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Catch(context, handler), Promise);
-}
-
-
MaybeLocal<Promise> Promise::Then(Local<Context> context,
Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
@@ -7663,12 +7452,6 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
}
-Local<Promise> Promise::Then(Local<Function> handler) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Then(context, handler), Promise);
-}
-
-
bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
@@ -7700,9 +7483,9 @@ Promise::PromiseState Promise::State() {
return static_cast<PromiseState>(js_promise->status());
}
-Local<Object> Proxy::GetTarget() {
+Local<Value> Proxy::GetTarget() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
- i::Handle<i::JSReceiver> target(self->target());
+ i::Handle<i::Object> target(self->target(), self->GetIsolate());
return Utils::ToLocal(target);
}
@@ -7742,8 +7525,8 @@ Local<String> WasmCompiledModule::GetWasmWireBytes() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
- i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
- i::Handle<i::String> wire_bytes(compiled_part->module_bytes());
+ i::handle(obj->compiled_module());
+ i::Handle<i::String> wire_bytes(compiled_part->shared()->module_bytes());
return Local<String>::Cast(Utils::ToLocal(wire_bytes));
}
@@ -7782,20 +7565,7 @@ WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
- if (i::FLAG_wasm_jit_to_native) {
- i::Isolate* isolate = obj->GetIsolate();
-
- return i::wasm::NativeModuleSerializer::SerializeWholeModule(isolate,
- compiled_part);
- } else {
- std::unique_ptr<i::ScriptData> script_data =
- i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
- compiled_part);
- script_data->ReleaseDataOwnership();
-
- size_t size = static_cast<size_t>(script_data->length());
- return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
- }
+ return i::wasm::SerializeNativeModule(obj->GetIsolate(), compiled_part);
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
@@ -7803,25 +7573,14 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::FixedArray> maybe_compiled_part;
- if (i::FLAG_wasm_jit_to_native) {
- maybe_compiled_part =
- i::wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- i_isolate, {serialized_module.first, serialized_module.second},
- {wire_bytes.first, wire_bytes.second});
- } else {
- int size = static_cast<int>(serialized_module.second);
- i::ScriptData sc(serialized_module.first, size);
- maybe_compiled_part =
- i::WasmCompiledModuleSerializer::DeserializeWasmModule(
- i_isolate, &sc, {wire_bytes.first, wire_bytes.second});
- }
- i::Handle<i::FixedArray> compiled_part;
- if (!maybe_compiled_part.ToHandle(&compiled_part)) {
+ i::MaybeHandle<i::WasmCompiledModule> maybe_compiled_module =
+ i::wasm::DeserializeNativeModule(
+ i_isolate, {serialized_module.first, serialized_module.second},
+ {wire_bytes.first, wire_bytes.second});
+ i::Handle<i::WasmCompiledModule> compiled_module;
+ if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return MaybeLocal<WasmCompiledModule>();
}
- i::Handle<i::WasmCompiledModule> compiled_module =
- handle(i::WasmCompiledModule::cast(*compiled_part));
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(
i::WasmModuleObject::New(i_isolate, compiled_module))));
@@ -7866,8 +7625,10 @@ WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*GetPromise());
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
streaming_decoder_ =
- i_isolate->wasm_compilation_manager()->StartStreamingCompilation(
- i_isolate, handle(i_isolate->context()), promise);
+ i_isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(i_isolate, handle(i_isolate->context()),
+ promise);
}
}
@@ -7907,7 +7668,8 @@ void WasmModuleObjectBuilderStreaming::Finish() {
// will be resolved when we move to true streaming compilation.
i::wasm::AsyncCompile(reinterpret_cast<i::Isolate*>(isolate_),
Utils::OpenHandle(*promise_.Get(isolate_)),
- {wire_bytes.get(), wire_bytes.get() + total_size_});
+ {wire_bytes.get(), wire_bytes.get() + total_size_},
+ false);
}
void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
@@ -7917,6 +7679,12 @@ void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
if (promise->State() != v8::Promise::kPending) return;
if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
+ // If there is no exception, then we do not reject the promise. The reason is
+ // that 'no exception' indicates that we are in a ScriptForbiddenScope, which
+ // means that it is not allowed to reject the promise at the moment, or
+ // execute any other JavaScript code.
+ if (exception.IsEmpty()) return;
+
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
@@ -7973,6 +7741,14 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
+ if (self->has_guard_region()) {
+ // Since this is being externalized, the Wasm Allocation Tracker can no
+ // longer track it.
+ //
+ // TODO(eholk): Find a way to track this across externalization
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ self->allocation_length());
+ }
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
@@ -8188,6 +7964,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
+ if (self->has_guard_region()) {
+ // Since this is being externalized, the Wasm Allocation Tracker can no
+ // longer track it.
+ //
+ // TODO(eholk): Find a way to track this across externalization
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ self->allocation_length());
+ }
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
}
@@ -8197,14 +7981,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents;
+ contents.allocation_base_ = self->allocation_base();
+ contents.allocation_length_ = self->allocation_length();
+ contents.allocation_mode_ =
+ self->has_guard_region()
+ ? ArrayBufferAllocator::Allocator::AllocationMode::kReservation
+ : ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
contents.byte_length_ = byte_length;
- // SharedArrayBuffers never have guard regions, so their allocation and data
- // are equivalent.
- contents.allocation_base_ = self->backing_store();
- contents.allocation_length_ = byte_length;
- contents.allocation_mode_ =
- ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
return contents;
}
@@ -8727,6 +8511,11 @@ Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
+i::Object** Isolate::GetDataFromSnapshotOnce(size_t index) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
+ i::FixedArray* list = i_isolate->heap()->serialized_objects();
+ return GetSerializedDataFromFixedArray(i_isolate, list, index);
+}
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8742,6 +8531,9 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
isolate->allocator()->GetCurrentMemoryUsage();
heap_statistics->peak_malloced_memory_ =
isolate->allocator()->GetMaxMemoryUsage();
+ heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
+ heap_statistics->number_of_detached_contexts_ =
+ heap->NumberOfDetachedContexts();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -8870,7 +8662,6 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
isolate->RemoveCallCompletedCallback(callback);
}
-
void Isolate::AddCallCompletedCallback(
DeprecatedCallCompletedCallback callback) {
AddCallCompletedCallback(reinterpret_cast<CallCompletedCallback>(callback));
@@ -8985,15 +8776,6 @@ void Isolate::SetAddHistogramSampleFunction(
}
-bool Isolate::IdleNotification(int idle_time_in_ms) {
- // Returning true tells the caller that it need not
- // continue to call IdleNotification.
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!i::FLAG_use_idle_notification) return true;
- return isolate->heap()->IdleNotification(idle_time_in_ms);
-}
-
-
bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -9346,14 +9128,6 @@ Local<Message> Exception::CreateMessage(Isolate* isolate,
}
-Local<Message> Exception::CreateMessage(Local<Value> exception) {
- i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
- if (!obj->IsHeapObject()) return Local<Message>();
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- return CreateMessage(reinterpret_cast<Isolate*>(isolate), exception);
-}
-
-
Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsJSObject()) return Local<StackTrace>();
@@ -9664,9 +9438,9 @@ bool debug::Script::GetPossibleBreakpoints(
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
- i::Handle<i::WasmCompiledModule> compiled_module(
- i::WasmCompiledModule::cast(script->wasm_compiled_module()));
- return compiled_module->GetPossibleBreakpoints(start, end, locations);
+ i::WasmSharedModuleData* shared =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module())->shared();
+ return shared->GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
@@ -9715,6 +9489,7 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
return i::WasmCompiledModule::cast(script->wasm_compiled_module())
+ ->shared()
->GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
}
@@ -9784,8 +9559,9 @@ int debug::WasmScript::NumFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- DCHECK_GE(i::kMaxInt, compiled_module->module()->functions.size());
- return static_cast<int>(compiled_module->module()->functions.size());
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
+ DCHECK_GE(i::kMaxInt, module->functions.size());
+ return static_cast<int>(module->functions.size());
}
int debug::WasmScript::NumImportedFunctions() const {
@@ -9794,8 +9570,9 @@ int debug::WasmScript::NumImportedFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- DCHECK_GE(i::kMaxInt, compiled_module->module()->num_imported_functions);
- return static_cast<int>(compiled_module->module()->num_imported_functions);
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
+ DCHECK_GE(i::kMaxInt, module->num_imported_functions);
+ return static_cast<int>(module->num_imported_functions);
}
std::pair<int, int> debug::WasmScript::GetFunctionRange(
@@ -9805,10 +9582,10 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
DCHECK_LE(0, function_index);
- DCHECK_GT(compiled_module->module()->functions.size(), function_index);
- i::wasm::WasmFunction& func =
- compiled_module->module()->functions[function_index];
+ DCHECK_GT(module->functions.size(), function_index);
+ i::wasm::WasmFunction& func = module->functions[function_index];
DCHECK_GE(i::kMaxInt, func.code.offset());
DCHECK_GE(i::kMaxInt, func.code.end_offset());
return std::make_pair(static_cast<int>(func.code.offset()),
@@ -9822,7 +9599,7 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
- return compiled_module->DisassembleFunction(function_index);
+ return compiled_module->shared()->DisassembleFunction(function_index);
}
debug::Location::Location(int line_number, int column_number)
@@ -9851,9 +9628,6 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
PersistentValueVector<debug::Script>& scripts) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- // TODO(kozyatinskiy): remove this GC once tests are dealt with.
- isolate->heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask,
- i::GarbageCollectionReason::kDebugger);
{
i::DisallowHeapAllocation no_gc;
i::Script::Iterator iterator(isolate);
@@ -10913,7 +10687,7 @@ void InvokeAccessorGetterCallback(
// Leaving JavaScript.
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::AccessorGetterCallback);
+ RuntimeCallCounterId::kAccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
getter));
VMState<EXTERNAL> state(isolate);
@@ -10926,7 +10700,7 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
- &RuntimeCallStats::InvokeFunctionCallback);
+ RuntimeCallCounterId::kInvokeFunctionCallback);
Address callback_address =
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
@@ -10934,6 +10708,25 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
callback(info);
}
+// Undefine macros for jumbo build.
+#undef LOG_API
+#undef ENTER_V8_DO_NOT_USE
+#undef ENTER_V8_HELPER_DO_NOT_USE
+#undef PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE
+#undef PREPARE_FOR_EXECUTION_WITH_CONTEXT
+#undef PREPARE_FOR_EXECUTION
+#undef ENTER_V8
+#undef ENTER_V8_NO_SCRIPT
+#undef ENTER_V8_NO_SCRIPT_NO_EXCEPTION
+#undef ENTER_V8_FOR_NEW_CONTEXT
+#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
+#undef RETURN_ON_FAILED_EXECUTION
+#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
+#undef RETURN_TO_LOCAL_UNCHECKED
+#undef RETURN_ESCAPED
+#undef SET_FIELD_WRAPPED
+#undef NEW_STRING
+#undef CALLBACK_SETTER
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 0a70ac83e4..7bd03c37da 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -11,6 +11,7 @@
#include "src/detachable-vector.h"
#include "src/factory.h"
#include "src/isolate.h"
+#include "src/objects/js-collection.h"
namespace v8 {
@@ -404,6 +405,7 @@ class HandleScopeImplementer {
call_depth_(0),
microtasks_depth_(0),
microtasks_suppressions_(0),
+ entered_contexts_count_(0),
entered_context_count_during_microtasks_(0),
#ifdef DEBUG
debug_microtasks_depth_(0),
@@ -530,6 +532,7 @@ class HandleScopeImplementer {
int call_depth_;
int microtasks_depth_;
int microtasks_suppressions_;
+ size_t entered_contexts_count_;
size_t entered_context_count_during_microtasks_;
#ifdef DEBUG
int debug_microtasks_depth_;
@@ -545,10 +548,25 @@ class HandleScopeImplementer {
friend class DeferredHandles;
friend class DeferredHandleScope;
+ friend class HandleScopeImplementerOffsets;
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};
+class HandleScopeImplementerOffsets {
+ public:
+ enum Offsets {
+ kMicrotaskContext = offsetof(HandleScopeImplementer, microtask_context_),
+ kEnteredContexts = offsetof(HandleScopeImplementer, entered_contexts_),
+ kEnteredContextsCount =
+ offsetof(HandleScopeImplementer, entered_contexts_count_),
+ kEnteredContextCountDuringMicrotasks = offsetof(
+ HandleScopeImplementer, entered_context_count_during_microtasks_)
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HandleScopeImplementerOffsets);
+};
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
@@ -583,9 +601,13 @@ bool HandleScopeImplementer::HasSavedContexts() {
void HandleScopeImplementer::EnterContext(Handle<Context> context) {
entered_contexts_.push_back(*context);
+ entered_contexts_count_ = entered_contexts_.size();
}
-void HandleScopeImplementer::LeaveContext() { entered_contexts_.pop_back(); }
+void HandleScopeImplementer::LeaveContext() {
+ entered_contexts_.pop_back();
+ entered_contexts_count_ = entered_contexts_.size();
+}
bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
return !entered_contexts_.empty() && entered_contexts_.back() == *context;
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 3d58b8249b..d01e77314a 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -85,7 +85,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Runtime_" #Name); \
Arguments args(args_length, args_object); \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index ce6b759d30..f420f2e5cb 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -68,7 +68,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -85,7 +85,7 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
- return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
+ return Assembler::constant_pool_entry_address(pc_, constant_pool_);
}
@@ -95,21 +95,21 @@ int RelocInfo::target_address_size() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -122,7 +122,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -158,7 +158,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -382,18 +382,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 8c22974ca3..a615d67496 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -72,7 +72,7 @@ static unsigned CpuFeaturesFromCommandLine() {
" armv7+sudiv\n"
" armv7\n"
" armv6\n");
- CHECK(false);
+ FATAL("arm-arch");
}
// If any of the old (deprecated) flags are specified, print a warning, but
@@ -339,21 +339,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+ return reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -474,7 +476,6 @@ void NeonMemOperand::SetAlignment(int align) {
break;
default:
UNREACHABLE();
- align_ = 0;
break;
}
}
@@ -519,23 +520,23 @@ const Instr kBlxRegMask =
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
-const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
-const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnMask = 0x6D * B21 | 0xF * B16;
+const Instr kMovMvnPattern = 0xD * B21;
const Instr kMovMvnFlip = B22;
-const Instr kMovLeaveCCMask = 0xdff * B16;
-const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovLeaveCCMask = 0xDFF * B16;
+const Instr kMovLeaveCCPattern = 0x1A0 * B16;
const Instr kMovwPattern = 0x30 * B20;
const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
-const Instr kMovImmedMask = 0x7f * B21;
-const Instr kMovImmedPattern = 0x1d * B21;
-const Instr kOrrImmedMask = 0x7f * B21;
-const Instr kOrrImmedPattern = 0x1c * B21;
-const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kMovImmedMask = 0x7F * B21;
+const Instr kMovImmedPattern = 0x1D * B21;
+const Instr kOrrImmedMask = 0x7F * B21;
+const Instr kOrrImmedPattern = 0x1C * B21;
+const Instr kCmpCmnMask = 0xDD * B20 | 0xF * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
const Instr kAddSubFlip = 0x6 * B21;
-const Instr kAndBicFlip = 0xe * B21;
+const Instr kAndBicFlip = 0xE * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
@@ -543,7 +544,7 @@ const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
-const Instr kLdrStrInstrTypeMask = 0xffff0000;
+const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
@@ -1046,7 +1047,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
- if ((imm8 <= 0xff)) {
+ if ((imm8 <= 0xFF)) {
*rotate_imm = rot;
*immed_8 = imm8;
return true;
@@ -1172,7 +1173,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
- movw(target, imm32 & 0xffff, cond);
+ movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
}
if (target.code() != rd.code()) {
@@ -1187,7 +1188,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
immediate = x.immediate();
}
ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate);
- ldr(rd, MemOperand(pc, 0), cond);
+ ldr_pcrel(rd, 0, cond);
}
}
@@ -1234,7 +1235,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// This means that finding the even number of trailing zeroes of the
// immediate allows us to more efficiently split it:
int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
- uint32_t mask = (0xff << trailing_zeroes);
+ uint32_t mask = (0xFF << trailing_zeroes);
add(rd, rd, Operand(imm & mask), LeaveCC, cond);
imm = imm & ~mask;
} while (!ImmediateFitsAddrMode1Instruction(imm));
@@ -1294,6 +1295,9 @@ bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | B | L)) == B26);
+ // This method does not handle pc-relative addresses. ldr_pcrel() should be
+ // used instead.
+ DCHECK(x.rn_ != pc);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -1331,6 +1335,9 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
DCHECK(x.rn_.is_valid());
+ // This method does not handle pc-relative addresses. ldr_pcrel() should be
+ // used instead.
+ DCHECK(x.rn_ != pc);
int am = x.am_;
bool is_load = (instr & L) == L;
if (!x.rm_.is_valid()) {
@@ -1353,7 +1360,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
return;
}
DCHECK_GE(offset_8, 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ instr |= B | (offset_8 >> 4) * B8 | (offset_8 & 0xF);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset separately
// to a scratch register.
@@ -1709,8 +1716,8 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
- emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
- src2.code()*B8 | B4 | src1.code());
+ emit(cond | B26 | B25 | B24 | B20 | dst.code() * B16 | 0xF * B12 |
+ src2.code() * B8 | B4 | src1.code());
}
@@ -1718,7 +1725,7 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
- emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
+ emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xF * B12 |
src2.code() * B8 | B4 | src1.code());
}
@@ -1742,7 +1749,7 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
- emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
+ emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xF * B12 |
src2.code() * B8 | B4 | src1.code());
}
@@ -1824,8 +1831,8 @@ void Assembler::usat(Register dst,
sh = 1;
}
- emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
- src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
+ emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
+ src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
}
@@ -1844,8 +1851,8 @@ void Assembler::ubfx(Register dst,
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
+ emit(cond | 0xF * B23 | B22 | B21 | (width - 1) * B16 | dst.code() * B12 |
+ lsb * B7 | B6 | B4 | src.code());
}
@@ -1863,8 +1870,8 @@ void Assembler::sbfx(Register dst,
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
+ emit(cond | 0xF * B23 | B21 | (width - 1) * B16 | dst.code() * B12 |
+ lsb * B7 | B6 | B4 | src.code());
}
@@ -1878,7 +1885,7 @@ void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
+ emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
}
@@ -1896,7 +1903,7 @@ void Assembler::bfi(Register dst,
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
+ emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 |
src.code());
}
@@ -2073,8 +2080,8 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- DCHECK_NE(fields & 0x000f0000, 0); // At least one field must be set.
- DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
+ DCHECK_NE(fields & 0x000F0000, 0); // At least one field must be set.
+ DCHECK(((fields & 0xFFF0FFFF) == CPSR) || ((fields & 0xFFF0FFFF) == SPSR));
Instr instr;
if (src.IsImmediate()) {
// Immediate.
@@ -2159,13 +2166,23 @@ void Assembler::strd(Register src1, Register src2,
AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+void Assembler::ldr_pcrel(Register dst, int imm12, Condition cond) {
+ AddrMode am = Offset;
+ if (imm12 < 0) {
+ imm12 = -imm12;
+ am = NegOffset;
+ }
+ DCHECK(is_uint12(imm12));
+ emit(cond | B26 | am | L | pc.code() * B16 | dst.code() * B12 | imm12);
+}
+
// Load/Store exclusive instructions.
void Assembler::ldrex(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.75.
// cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(dst != pc);
DCHECK(src != pc);
- emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
+ emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xF9F);
}
void Assembler::strex(Register src1, Register src2, Register dst,
@@ -2178,7 +2195,7 @@ void Assembler::strex(Register src1, Register src2, Register dst,
DCHECK(src2 != pc);
DCHECK(src1 != dst);
DCHECK(src1 != src2);
- emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
+ emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xF9 * B4 |
src2.code());
}
@@ -2188,7 +2205,7 @@ void Assembler::ldrexb(Register dst, Register src, Condition cond) {
DCHECK(dst != pc);
DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
- 0xf9f);
+ 0xF9F);
}
void Assembler::strexb(Register src1, Register src2, Register dst,
@@ -2202,7 +2219,7 @@ void Assembler::strexb(Register src1, Register src2, Register dst,
DCHECK(src1 != dst);
DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
- 0xf9 * B4 | src2.code());
+ 0xF9 * B4 | src2.code());
}
void Assembler::ldrexh(Register dst, Register src, Condition cond) {
@@ -2211,7 +2228,7 @@ void Assembler::ldrexh(Register dst, Register src, Condition cond) {
DCHECK(dst != pc);
DCHECK(src != pc);
emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
- dst.code() * B12 | 0xf9f);
+ dst.code() * B12 | 0xF9F);
}
void Assembler::strexh(Register src1, Register src2, Register dst,
@@ -2225,7 +2242,7 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
DCHECK(src1 != dst);
DCHECK(src1 != src2);
emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
- 0xf9 * B4 | src2.code());
+ 0xF9 * B4 | src2.code());
}
// Preload instructions.
@@ -2242,8 +2259,8 @@ void Assembler::pld(const MemOperand& address) {
U = 0;
}
DCHECK_LT(offset, 4096);
- emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
- 0xf*B12 | offset);
+ emit(kSpecialCondition | B26 | B24 | U | B22 | B20 |
+ address.rn().code() * B16 | 0xF * B12 | offset);
}
@@ -2305,7 +2322,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
void Assembler::bkpt(uint32_t imm16) {
DCHECK(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
+ emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
}
@@ -2318,7 +2335,7 @@ void Assembler::svc(uint32_t imm24, Condition cond) {
void Assembler::dmb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-378.
- emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 5 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
@@ -2330,7 +2347,7 @@ void Assembler::dmb(BarrierOption option) {
void Assembler::dsb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-380.
- emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 4 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
@@ -2342,7 +2359,7 @@ void Assembler::dsb(BarrierOption option) {
void Assembler::isb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-389.
- emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
+ emit(kSpecialCondition | 0x57FF * B12 | 6 * B4 | option);
} else {
// Details available in ARM DDI 0406C.b, B3-1750.
// CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
@@ -2728,7 +2745,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
uint64_t i = d.AsUint64();
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
@@ -2757,12 +2774,12 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
DoubleAsTwoUInt32(d, &lo, &hi);
// The most obvious constraint is the long block of zeroes.
- if ((lo != 0) || ((hi & 0xffff) != 0)) {
+ if ((lo != 0) || ((hi & 0xFFFF) != 0)) {
return false;
}
// Bits 61:54 must be all clear or all set.
- if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
+ if (((hi & 0x3FC00000) != 0) && ((hi & 0x3FC00000) != 0x3FC00000)) {
return false;
}
@@ -2773,7 +2790,7 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
// Create the encoded immediate in the form:
// [00000000,0000abcd,00000000,0000efgh]
- *encoding = (hi >> 16) & 0xf; // Low nybble.
+ *encoding = (hi >> 16) & 0xF; // Low nybble.
*encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
*encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
@@ -2852,8 +2869,7 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// We only have one spare scratch register.
mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch);
- if (((lo & 0xffff) == (hi & 0xffff)) &&
- CpuFeatures::IsSupported(ARMv7)) {
+ if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(scratch, hi >> 16);
} else {
@@ -3193,7 +3209,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
dst.split_code(&vd, &d);
int imm5 = 32 - fraction_bits;
int i = imm5 & 1;
- int imm4 = (imm5 >> 1) & 0xf;
+ int imm4 = (imm5 >> 1) & 0xF;
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
}
@@ -4973,12 +4989,12 @@ Instr Assembler::GetMovWPattern() { return kMovwPattern; }
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
DCHECK_LT(immediate, 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+ return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
}
Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
- instruction &= ~EncodeMovwImmediate(0xffff);
+ instruction &= ~EncodeMovwImmediate(0xFFFF);
return instruction | EncodeMovwImmediate(immediate);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 9d8cb4c05c..8b95aad886 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -173,6 +173,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = Register::no_reg();
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
@@ -652,10 +653,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -907,6 +904,9 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Load literal from a pc relative address.
+ void ldr_pcrel(Register dst, int imm12, Condition cond = al);
+
// Load/Store exclusive instructions
void ldrex(Register dst, Register src, Condition cond = al);
void strex(Register src1, Register src2, Register dst, Condition cond = al);
@@ -1344,6 +1344,10 @@ class Assembler : public AssemblerBase {
void pop();
+ void vpush(QwNeonRegister src, Condition cond = al) {
+ vstm(db_w, sp, src.low(), src.high(), cond);
+ }
+
void vpush(DwVfpRegister src, Condition cond = al) {
vstm(db_w, sp, src, src, cond);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 2add525abd..ee706c7656 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -83,7 +83,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (masm->emit_debug_code()) {
// Scratch is exponent - 1.
__ cmp(scratch, Operand(30 - 1));
- __ Check(ge, kUnexpectedValue);
+ __ Check(ge, AbortReason::kUnexpectedValue);
}
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
@@ -116,8 +116,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// double_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and double_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
@@ -414,6 +414,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, Double(0.0));
+ __ InitializeRootRegister();
+
// Get address of argv, see stm above.
// r0: code entry
// r1: function
@@ -509,12 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r2: receiver
// r3: argc
// r4: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -681,7 +678,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -723,7 +720,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ ldr(r5, FieldMemOperand(r2, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -747,7 +744,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -824,9 +821,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r4, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r2, r4);
@@ -904,9 +901,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -922,8 +919,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(HOLEY_ELEMENTS));
- __ Assert(eq,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1025,7 +1023,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ ldr(r1, MemOperand(r9, kLevelOffset));
__ cmp(r1, r6);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ sub(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index f7e29ace49..9fb2eb4e8d 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -170,8 +169,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -184,8 +182,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -261,8 +258,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@@ -273,8 +269,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -290,8 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index c788d33ef2..b50948fc36 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -20,7 +20,7 @@ Float64 Instruction::DoubleImmedVmov() const {
// where B = ~b. Only the high 16 bits are affected.
uint64_t high16;
high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 20cf8e4d5e..1c865afb09 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -34,9 +34,6 @@ inline int DecodeConstantPoolLength(int instr) {
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
-// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
-const int kCodeAgeJumpInstruction = 0xe51ff004;
-
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 81224c5fcb..9a21ef862c 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -245,9 +245,9 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Note that registers are still live when jumping to an entry.
// We need to be able to generate immediates up to kMaxNumberOfEntries. On
- // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
+ // ARMv7, we can use movw (with a maximum immediate of 0xFFFF). On ARMv6, we
// need two instructions.
- STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+ STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xFFFF);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
@@ -263,7 +263,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
} else {
// We want to keep table_entry_size_ == 8 (since this is the common case),
- // but we need two instructions to load most immediates over 0xff. To handle
+ // but we need two instructions to load most immediates over 0xFF. To handle
// this, we set the low byte in the main table, and then set the high byte
// in a separate table if necessary.
Label high_fixes[256];
@@ -272,7 +272,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ mov(scratch, Operand(i & 0xff)); // Set the low byte.
+ __ mov(scratch, Operand(i & 0xFF)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 05adc37f61..9951136561 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -541,7 +541,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
// 'msg: for simulator break instructions
DCHECK(STRING_STARTS_WITH(format, "msg"));
byte* str =
- reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
+ reinterpret_cast<byte*>(instr->InstructionBits() & 0x0FFFFFFF);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s", converter_.NameInCode(str));
return 3;
@@ -819,7 +819,7 @@ void Decoder::DecodeType01(Instruction* instr) {
Unknown(instr); // not used by V8
}
}
- } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
+ } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xD) == 0xD)) {
// ldrd, strd
switch (instr->PUField()) {
case da_x: {
@@ -905,7 +905,7 @@ void Decoder::DecodeType01(Instruction* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
- (instr->Bits(15, 4) == 0xf00)) {
+ (instr->Bits(15, 4) == 0xF00)) {
Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
} else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
(instr->Bits(11, 0) == 0)) {
@@ -1285,8 +1285,8 @@ void Decoder::DecodeType3(Instruction* instr) {
}
} else {
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
- if ((instr->Bits(20, 16) == 0x1f) &&
- (instr->Bits(11, 4) == 0xf3)) {
+ if ((instr->Bits(20, 16) == 0x1F) &&
+ (instr->Bits(11, 4) == 0xF3)) {
Format(instr, "rbit'cond 'rd, 'rm");
} else {
UNREACHABLE();
@@ -1561,7 +1561,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
if (instr->Bit(23) == 0) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32/NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
@@ -1597,7 +1597,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32 / NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
@@ -1972,7 +1972,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
@@ -1980,14 +1980,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
- case 0xb: {
+ case 0xB: {
// vpadd.i<size> Dd, Dm, Dn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
size, Vd, Vn, Vm);
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
// vadd/vsub.f32 Qd, Qm, Qn.
@@ -1998,7 +1998,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -2008,7 +2008,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xf: {
+ case 0xF: {
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
@@ -2158,7 +2158,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
@@ -2166,7 +2166,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
// vmul.f32 Qd, Qm, Qn
@@ -2182,7 +2182,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
@@ -2332,12 +2332,12 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
char type = instr->Bit(10) != 0 ? 'f' : 's';
- if (instr->Bits(9, 6) == 0xd) {
+ if (instr->Bits(9, 6) == 0xD) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
type, size, Vd, Vm);
- } else if (instr->Bits(9, 6) == 0xf) {
+ } else if (instr->Bits(9, 6) == 0xF) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
@@ -2423,7 +2423,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
case 0xA:
case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
int offset = instr->Bits(11, 0);
if (offset == 0) {
@@ -2601,14 +2601,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
- } else if (instruction_bits == kCodeAgeJumpInstruction) {
- // The code age prologue has a constant immediately following the jump
- // instruction.
- Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
- DecodeType2(instr);
- SNPrintF(out_buffer_ + out_buffer_pos_,
- " (0x%08x)", target->InstructionBits());
- return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 20ef0e37bc..6b7498fde5 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return r0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r4; }
-
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
@@ -204,6 +202,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 8575b0336c..30190d3f34 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -224,44 +224,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, mode);
}
-void MacroAssembler::CallDeoptimizer(Address target) {
- BlockConstPoolScope block_const_pool(this);
-
- uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
-
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
-
- // We use blx, like a call, but it does not return here. The link register is
- // used by the deoptimizer to work out what called it.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(this, ARMv7);
- movw(ip, target_raw & 0xffff);
- movt(ip, (target_raw >> 16) & 0xffff);
- blx(ip);
- } else {
- // We need to load a literal, but we can't use the usual constant pool
- // because we call this from a patcher, and cannot afford the guard
- // instruction and other administrative overhead.
- ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
- blx(ip);
- dd(target_raw);
- }
-}
-
-int MacroAssembler::CallDeoptimizerSize() {
- // ARMv7+:
- // movw ip, ...
- // movt ip, ...
- // blx ip @ This never returns.
- //
- // ARMv6:
- // ldr ip, =address
- // blx ip @ This never returns.
- // .word address
- return 3 * kInstrSize;
-}
-
void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
void TurboAssembler::Drop(int count, Condition cond) {
@@ -608,7 +570,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(address));
cmp(scratch, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -985,7 +947,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
lsl(dst_high, src_low, Operand(scratch));
mov(dst_low, Operand(0));
jmp(&done);
@@ -1010,7 +972,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Move(dst_high, src_low);
Move(dst_low, Operand(0));
} else if (shift >= 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
lsl(dst_high, src_low, Operand(shift));
mov(dst_low, Operand(0));
} else {
@@ -1031,7 +993,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
lsr(dst_low, src_high, Operand(scratch));
mov(dst_high, Operand(0));
jmp(&done);
@@ -1054,7 +1016,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
mov(dst_low, src_high);
mov(dst_high, Operand(0));
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
lsr(dst_low, src_high, Operand(shift));
mov(dst_high, Operand(0));
} else if (shift == 0) {
@@ -1078,7 +1040,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
- and_(scratch, shift, Operand(0x1f));
+ and_(scratch, shift, Operand(0x1F));
asr(dst_low, src_high, Operand(scratch));
asr(dst_high, src_high, Operand(31));
jmp(&done);
@@ -1100,7 +1062,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
mov(dst_low, src_high);
asr(dst_high, src_high, Operand(31));
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
asr(dst_low, src_high, Operand(shift));
asr(dst_high, src_high, Operand(31));
} else if (shift == 0) {
@@ -1218,7 +1180,6 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_ARM
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -1244,6 +1205,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
ldr(cp, MemOperand(scratch));
#ifdef DEBUG
+ mov(r3, Operand(Context::kInvalidContext));
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
str(r3, MemOperand(scratch));
@@ -1307,7 +1269,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmp(src_reg, dst_reg);
- Check(lo, kStackAccessBelowStackPointer);
+ Check(lo, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1539,15 +1501,15 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
// Link the current handler as the next handler.
mov(r6,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
ldr(r5, MemOperand(r6));
push(r5);
-
// Set this new handler as the current one.
str(sp, MemOperand(r6));
}
@@ -1560,8 +1522,8 @@ void MacroAssembler::PopStackHandler() {
pop(r1);
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(scratch));
+ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1660,9 +1622,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
sub(scratch, result, Operand(1));
- cmp(scratch, Operand(0x7ffffffe));
+ cmp(scratch, Operand(0x7FFFFFFE));
b(lt, done);
}
@@ -1765,12 +1727,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason) {
if (emit_debug_code())
Check(cond, reason);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label L;
b(cond, &L);
Abort(reason);
@@ -1778,11 +1740,11 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1873,7 +1835,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmi);
+ Check(ne, AbortReason::kOperandIsASmi);
}
}
@@ -1882,7 +1844,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(eq, kOperandIsNotSmi);
+ Check(eq, AbortReason::kOperandIsNotASmi);
}
}
@@ -1890,11 +1852,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAFixedArray);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1902,11 +1864,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAFunction);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1915,18 +1877,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotABoundFunction);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
Register map = object;
@@ -1945,7 +1907,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1957,7 +1919,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 2f97869621..cf731cbedb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -294,13 +294,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
+ void Assert(Condition cond, AbortReason reason);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
+ void Check(Condition cond, AbortReason reason);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
@@ -579,10 +579,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Used for patching in calls to the deoptimizer.
- void CallDeoptimizer(Address target);
- static int CallDeoptimizerSize();
-
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 8ab6cb6b5c..52fe902237 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -259,11 +259,9 @@ void ArmDebugger::Debug() {
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n",
- VFPRegisters::Name(i, true),
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true),
+ dvalue, static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else {
if (GetValue(arg1, &value)) {
@@ -273,11 +271,9 @@ void ArmDebugger::Debug() {
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n",
- arg1,
- dvalue,
+ PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -575,6 +571,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(al | (0xF * B24) | kCallRtRedirected);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -644,21 +644,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -715,100 +706,6 @@ Simulator::~Simulator() {
free(stack_);
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int32_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -1035,9 +932,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
}
@@ -1292,7 +1189,7 @@ void Simulator::SetVFlag(bool val) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1409,7 +1306,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
case ASR: {
if (shift_amount == 0) {
if (result < 0) {
- result = 0xffffffff;
+ result = 0xFFFFFFFF;
*carry_out = true;
} else {
result = 0;
@@ -1468,7 +1365,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
} else {
// by register
int rs = instr->RsValue();
- shift_amount = get_register(rs) &0xff;
+ shift_amount = get_register(rs) & 0xFF;
switch (shift) {
case ASR: {
if (shift_amount == 0) {
@@ -1481,7 +1378,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
DCHECK_GE(shift_amount, 32);
if (result < 0) {
*carry_out = true;
- result = 0xffffffff;
+ result = 0xFFFFFFFF;
} else {
*carry_out = false;
result = 0;
@@ -1739,7 +1636,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp)
& (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
int32_t arg0 = get_register(r0);
int32_t arg1 = get_register(r1);
int32_t arg2 = get_register(r2);
@@ -1982,7 +1879,7 @@ Float32 Simulator::canonicalizeNaN(Float32 value) {
double Simulator::canonicalizeNaN(double value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
- constexpr uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
+ constexpr uint64_t kDefaultNaN = uint64_t{0x7FF8000000000000};
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<double>(kDefaultNaN);
}
@@ -1993,7 +1890,7 @@ Float64 Simulator::canonicalizeNaN(Float64 value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
constexpr Float64 kDefaultNaN =
- Float64::FromBits(V8_UINT64_C(0x7FF8000000000000));
+ Float64::FromBits(uint64_t{0x7FF8000000000000});
return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
}
@@ -2036,7 +1933,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
watched_stops_[code].count = 0;
@@ -2137,14 +2034,14 @@ void Simulator::DecodeType01(Instruction* instr) {
int64_t right_op = static_cast<int32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
+ lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
} else {
// unsigned multiply
uint64_t left_op = static_cast<uint32_t>(rm_val);
uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
+ lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
}
set_register(rd_lo, lo_res);
set_register(rd_hi, hi_res);
@@ -2316,7 +2213,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
}
- if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
+ if (((instr->Bits(7, 4) & 0xD) == 0xD) && (instr->Bit(20) == 0)) {
DCHECK_EQ(rd % 2, 0);
if (instr->HasH()) {
// The strd instruction.
@@ -2357,7 +2254,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
- (instr->Bits(15, 4) == 0xf00)) {
+ (instr->Bits(15, 4) == 0xF00)) {
// MSR
int rm = instr->RmValue();
DCHECK_NE(pc, rm); // UNPREDICTABLE
@@ -2569,8 +2466,8 @@ void Simulator::DecodeType01(Instruction* instr) {
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
// Format(instr, "movt'cond 'rd, 'imm").
- alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtValue() << 16);
+ alu_out =
+ (get_register(rd) & 0xFFFF) | (instr->ImmedMovwMovtValue() << 16);
set_register(rd, alu_out);
}
break;
@@ -2987,8 +2884,8 @@ void Simulator::DecodeType3(Instruction* instr) {
}
} else {
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
- if ((instr->Bits(20, 16) == 0x1f) &&
- (instr->Bits(11, 4) == 0xf3)) {
+ if ((instr->Bits(20, 16) == 0x1F) &&
+ (instr->Bits(11, 4) == 0xF3)) {
// Rbit.
uint32_t rm_val = get_register(instr->RmValue());
set_register(rd, base::bits::ReverseBits(rm_val));
@@ -3084,7 +2981,7 @@ void Simulator::DecodeType3(Instruction* instr) {
uint32_t rd_val =
static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = 0xffffffffu >> (32 - bitcount);
+ uint32_t mask = 0xFFFFFFFFu >> (32 - bitcount);
rd_val &= ~(mask << lsbit);
if (instr->RmValue() != 15) {
// bfi - bitfield insert.
@@ -3422,7 +3319,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32/NeonU32
uint32_t data[2];
get_d_register(vd, data);
@@ -3500,7 +3397,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
uint64_t data;
get_d_register(vn, &data);
- if ((opc1_opc2 & 0xb) == 0) {
+ if ((opc1_opc2 & 0xB) == 0) {
// NeonS32 / NeonU32
int32_t int_data[2];
memcpy(int_data, &data, sizeof(int_data));
@@ -3514,14 +3411,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int i = opc1_opc2 & 0x7;
int shift = i * kBitsPerByte;
uint32_t scalar = (data >> shift) & 0xFFu;
- if (!u && (scalar & 0x80) != 0) scalar |= 0xffffff00;
+ if (!u && (scalar & 0x80) != 0) scalar |= 0xFFFFFF00;
set_register(rt, scalar);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
int shift = i * kBitsPerByte * kShortSize;
uint32_t scalar = (data >> shift) & 0xFFFFu;
- if (!u && (scalar & 0x8000) != 0) scalar |= 0xffff0000;
+ if (!u && (scalar & 0x8000) != 0) scalar |= 0xFFFF0000;
set_register(rt, scalar);
} else {
UNREACHABLE(); // Not used by V8.
@@ -3702,7 +3599,7 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
double val,
bool unsigned_) {
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
- double max_uint = static_cast<double>(0xffffffffu);
+ double max_uint = static_cast<double>(0xFFFFFFFFu);
double max_int = static_cast<double>(kMaxInt);
double min_int = static_cast<double>(kMinInt);
@@ -3744,7 +3641,7 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
return 0;
} else {
if (unsigned_res) {
- return (val < 0) ? 0 : 0xffffffffu;
+ return (val < 0) ? 0 : 0xFFFFFFFFu;
} else {
return (val < 0) ? kMinInt : kMaxInt;
}
@@ -4496,7 +4393,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
bool min = instr->Bit(4) != 0;
@@ -4516,7 +4413,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xb: {
+ case 0xB: {
// vpadd.i<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
switch (size) {
@@ -4535,7 +4432,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bit(4) == 0) {
float src1[4], src2[4];
get_neon_register(Vn, src1);
@@ -4555,7 +4452,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
// vceq.f32.
float src1[4], src2[4];
@@ -4571,7 +4468,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xf: {
+ case 0xF: {
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
float src1[4], src2[4];
get_neon_register(Vn, src1);
@@ -4862,7 +4759,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xa: {
+ case 0xA: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
bool min = instr->Bit(4) != 0;
@@ -4882,7 +4779,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xd: {
+ case 0xD: {
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
@@ -4902,7 +4799,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xe: {
+ case 0xE: {
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
// vcge/vcgt.f32 Qd, Qm, Qn
bool ge = instr->Bit(21) == 0;
@@ -5014,15 +4911,15 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((imm4 & 0x1) != 0) {
size = 8;
index = imm4 >> 1;
- mask = 0xffu;
+ mask = 0xFFu;
} else if ((imm4 & 0x2) != 0) {
size = 16;
index = imm4 >> 2;
- mask = 0xffffu;
+ mask = 0xFFFFu;
} else {
size = 32;
index = imm4 >> 3;
- mask = 0xffffffffu;
+ mask = 0xFFFFFFFFu;
}
uint64_t d_data;
get_d_register(vm, &d_data);
@@ -5275,7 +5172,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bits(9, 6) == 0xd) {
+ if (instr->Bits(9, 6) == 0xD) {
// vabs<type>.<size> Qd, Qm
if (instr->Bit(10) != 0) {
// floating point (clear sign bits)
@@ -5302,7 +5199,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
}
}
- } else if (instr->Bits(9, 6) == 0xf) {
+ } else if (instr->Bits(9, 6) == 0xF) {
// vneg<type>.<size> Qd, Qm (signed integer)
if (instr->Bit(10) != 0) {
// floating point (toggle sign bits)
@@ -5561,7 +5458,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
case 0xA:
case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
// pld: ignore instruction.
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
// dsb, dmb, isb: ignore instruction for now.
@@ -5893,18 +5790,16 @@ void Simulator::CallInternal(byte* entry) {
set_register(r11, r11_val);
}
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments
// First four arguments passed in registers.
- DCHECK_GE(argument_count, 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
+ int reg_arg_count = std::min(4, argument_count);
+ if (reg_arg_count > 0) set_register(r0, arguments[0]);
+ if (reg_arg_count > 1) set_register(r1, arguments[1]);
+ if (reg_arg_count > 2) set_register(r2, arguments[2]);
+ if (reg_arg_count > 3) set_register(r3, arguments[3]);
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
@@ -5914,11 +5809,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
entry_stack &= -base::OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
+ memcpy(reinterpret_cast<intptr_t*>(entry_stack), arguments + reg_arg_count,
+ (argument_count - reg_arg_count) * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -5927,12 +5819,10 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int32_t result = get_register(r0);
- return result;
+ return get_register(r0);
}
-
-void Simulator::CallFP(byte* entry, double d0, double d1) {
+int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
@@ -5941,13 +5831,7 @@ void Simulator::CallFP(byte* entry, double d0, double d1) {
set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
-}
-
-
-int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
- CallFP(entry, d0, d1);
- int32_t result = get_register(r0);
- return result;
+ return get_register(r0);
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 26889018b5..1cb11ffd96 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for ARM instructions if we are not generating a native
// ARM binary. This Simulator allows us to run and debug ARM code generation on
// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code by using the GeneratedCode class,
// which will start execution in the Simulator or forwards to the real entry
// on a ARM HW platform.
@@ -18,56 +17,13 @@
#include "src/base/platform/mutex.h"
#include "src/boxed-float.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native arm platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on arm uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -102,8 +58,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class ArmDebugger;
enum Register {
@@ -134,7 +89,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing
@@ -203,18 +158,16 @@ class Simulator {
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
- void CallFP(byte* entry, double d0, double d1);
- int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ template <typename Return>
+ Return CallFP(byte* entry, double d0, double d1) {
+ return ConvertReturn<Return>(CallFPImpl(entry, d0, d1));
+ }
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -226,6 +179,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -255,6 +211,10 @@ class Simulator {
end_sim_pc = -2
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+ intptr_t CallFPImpl(byte* entry, double d0, double d1);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -369,11 +329,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -541,45 +496,8 @@ class Simulator {
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
- Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
- p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 70d50eb330..11c4bbf33f 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -532,12 +532,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -615,14 +609,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
-
int RelocInfo::target_address_size() {
return kPointerSize;
}
@@ -630,7 +616,7 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -647,21 +633,21 @@ Address RelocInfo::constant_pool_entry_address() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -674,7 +660,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -711,7 +697,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 2093a89df6..a031884e1f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -147,9 +147,6 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// is a caller-saved register according to the procedure call standard.
list.Combine(18);
- // Drop jssp as the stack pointer doesn't need to be included.
- list.Remove(28);
-
// Add the link register (x30) to the safepoint list.
list.Combine(30);
@@ -186,7 +183,8 @@ uint32_t RelocInfo::embedded_size() const {
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
@@ -2636,7 +2634,7 @@ Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
} else {
// The immediate post index addressing mode is indicated by rm = 31.
// The immediate is implied by the number of vector registers used.
- addr_field |= (0x1f << Rm_offset);
+ addr_field |= (0x1F << Rm_offset);
}
} else {
DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
@@ -3003,7 +3001,7 @@ void Assembler::fmov(const VRegister& vd, double imm) {
} else {
DCHECK(vd.Is2D());
Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
- Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
}
}
@@ -3015,7 +3013,7 @@ void Assembler::fmov(const VRegister& vd, float imm) {
DCHECK(vd.Is2S() | vd.Is4S());
Instr op = NEONModifiedImmediate_MOVI;
Instr q = vd.Is4S() ? NEON_Q : 0;
- Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
}
}
@@ -3596,15 +3594,15 @@ void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
DCHECK_EQ(shift_amount, 0);
int imm8 = 0;
for (int i = 0; i < 8; ++i) {
- int byte = (imm >> (i * 8)) & 0xff;
- DCHECK((byte == 0) || (byte == 0xff));
- if (byte == 0xff) {
+ int byte = (imm >> (i * 8)) & 0xFF;
+ DCHECK((byte == 0) || (byte == 0xFF));
+ if (byte == 0xFF) {
imm8 |= (1 << i);
}
}
Instr q = vd.Is2D() ? NEON_Q : 0;
Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
- ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
} else if (shift == LSL) {
NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
@@ -3953,7 +3951,7 @@ uint32_t Assembler::FPToImm8(double imm) {
// bit6: 0b00.0000
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
- uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3F;
return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
}
@@ -3971,7 +3969,7 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
// Check that the top 32 bits are zero (a positive 32-bit number) or top
// 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
DCHECK(((imm >> kWRegSizeInBits) == 0) ||
- ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
+ ((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
imm &= kWRegMask;
}
@@ -3984,16 +3982,16 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
// Calculate a new immediate and shift combination to encode the immediate
// argument.
shift = 0;
- if ((imm & ~0xffffUL) == 0) {
+ if ((imm & ~0xFFFFUL) == 0) {
// Nothing to do.
- } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 16)) == 0) {
imm >>= 16;
shift = 1;
- } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 32)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 32;
shift = 2;
- } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ } else if ((imm & ~(0xFFFFUL << 48)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 48;
shift = 3;
@@ -4247,7 +4245,7 @@ void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
DCHECK(is_uint8(imm8));
int cmode_0 = (shift_amount >> 4) & 1;
- int cmode = 0xc | cmode_0;
+ int cmode = 0xC | cmode_0;
Instr q = vd.IsQ() ? NEON_Q : 0;
@@ -4343,7 +4341,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
bool Assembler::IsImmAddSub(int64_t immediate) {
return is_uint12(immediate) ||
- (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+ (is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
}
void Assembler::LoadStore(const CPURegister& rt,
@@ -4526,7 +4524,7 @@ bool Assembler::IsImmLogical(uint64_t value,
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
d = clz_a - clz_c;
- mask = ((V8_UINT64_C(1) << d) - 1);
+ mask = ((uint64_t{1} << d) - 1);
out_n = 0;
} else {
// Handle degenerate cases.
@@ -4547,7 +4545,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// the general case above, and set the N bit in the output.
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
d = 64;
- mask = ~V8_UINT64_C(0);
+ mask = ~uint64_t{0};
out_n = 1;
}
}
@@ -4596,7 +4594,7 @@ bool Assembler::IsImmLogical(uint64_t value,
// Count the set bits in our basic stretch. The special case of clz(0) == -1
// makes the answer come out right for stretches that reach the very top of
- // the word (e.g. numbers like 0xffffc00000000000).
+ // the word (e.g. numbers like 0xFFFFC00000000000).
int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
int s = clz_a - clz_b;
@@ -4628,7 +4626,7 @@ bool Assembler::IsImmLogical(uint64_t value,
//
// So we 'or' (-d << 1) with our computed s to form imms.
*n = out_n;
- *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
*imm_r = r;
return true;
@@ -4645,13 +4643,13 @@ bool Assembler::IsImmFP32(float imm) {
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
uint32_t bits = bit_cast<uint32_t>(imm);
// bits[19..0] are cleared.
- if ((bits & 0x7ffff) != 0) {
+ if ((bits & 0x7FFFF) != 0) {
return false;
}
// bits[29..25] are all set or all cleared.
- uint32_t b_pattern = (bits >> 16) & 0x3e00;
- if (b_pattern != 0 && b_pattern != 0x3e00) {
+ uint32_t b_pattern = (bits >> 16) & 0x3E00;
+ if (b_pattern != 0 && b_pattern != 0x3E00) {
return false;
}
@@ -4670,13 +4668,13 @@ bool Assembler::IsImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = bit_cast<uint64_t>(imm);
// bits[47..0] are cleared.
- if ((bits & 0xffffffffffffL) != 0) {
+ if ((bits & 0xFFFFFFFFFFFFL) != 0) {
return false;
}
// bits[61..54] are all set or all cleared.
- uint32_t b_pattern = (bits >> 48) & 0x3fc0;
- if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ uint32_t b_pattern = (bits >> 48) & 0x3FC0;
+ if (b_pattern != 0 && b_pattern != 0x3FC0) {
return false;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index bfdab599a3..2deae8aaa4 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -39,7 +39,8 @@ namespace internal {
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
- R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27) \
+ R(x28)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -295,6 +296,7 @@ class Register : public CPURegister {
static_assert(IS_TRIVIALLY_COPYABLE(Register),
"Register can efficiently be passed by value");
+constexpr bool kPadArguments = true;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -479,13 +481,6 @@ ALIAS_REGISTER(Register, root, x26);
ALIAS_REGISTER(Register, rr, x26);
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
-// We use a register as a JS stack pointer to overcome the restriction on the
-// architectural SP alignment.
-// We chose x28 because it is contiguous with the other specific purpose
-// registers.
-STATIC_ASSERT(kJSSPCode == 28);
-ALIAS_REGISTER(Register, jssp, x28);
-ALIAS_REGISTER(Register, wjssp, w28);
ALIAS_REGISTER(Register, fp, x29);
ALIAS_REGISTER(Register, lr, x30);
ALIAS_REGISTER(Register, xzr, x31);
@@ -1001,10 +996,6 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 1ad50e5112..52f92b6af9 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Str(x1, MemOperand(jssp, x5));
+ __ Str(x1, MemOperand(__ StackPointer(), x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@@ -42,7 +42,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register result = destination();
DCHECK(result.Is64Bits());
- DCHECK(jssp.Is(masm->StackPointer()));
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
@@ -75,7 +74,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (masm->emit_debug_code()) {
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
// Exponents less than this should have been handled by the Fcvt case.
- __ Check(ge, kUnexpectedValue);
+ __ Check(ge, AbortReason::kUnexpectedValue);
}
// Isolate the mantissa bits, and set the implicit '1'.
@@ -100,8 +99,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
- // jssp[0]: Exponent (as a tagged value).
- // jssp[1]: Base (as a tagged value).
+ // sp[0]: Exponent (as a tagged value).
+ // sp[1]: Base (as a tagged value).
//
// The (tagged) result will be returned in x0, as a heap number.
@@ -276,15 +275,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// The stack on entry holds the arguments and the receiver, with the receiver
// at the highest address:
//
- // jssp]argc-1]: receiver
- // jssp[argc-2]: arg[argc-2]
+ // sp]argc-1]: receiver
+ // sp[argc-2]: arg[argc-2]
// ... ...
- // jssp[1]: arg[1]
- // jssp[0]: arg[0]
+ // sp[1]: arg[1]
+ // sp[0]: arg[0]
//
// The arguments are in reverse order, so that arg[argc-2] is actually the
// first argument to the target function and arg[0] is the last.
- DCHECK(jssp.Is(__ StackPointer()));
const Register& argc_input = x0;
const Register& target_input = x1;
@@ -385,7 +383,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
__ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
__ Cmp(temp, x12);
- __ Check(eq, kReturnAddressNotFoundInFrame);
+ __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
}
// Call the builtin.
@@ -415,8 +413,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles(), x10);
- DCHECK(jssp.Is(__ StackPointer()));
+ __ LeaveExitFrame(save_doubles(), x10, x9);
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
@@ -424,10 +421,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ AssertFPCRState();
__ Ret();
- // The stack pointer is still csp if we aren't returning, and the frame
- // hasn't changed (except for the return address).
- __ SetStackPointer(csp);
-
// Handling of exception.
__ Bind(&exception_returned);
@@ -453,18 +446,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CallCFunction(find_handler, 3);
}
- // We didn't execute a return case, so the stack frame hasn't been updated
- // (except for the return address slot). However, we don't need to initialize
- // jssp because the throw method will immediately overwrite it when it
- // unwinds the stack.
- __ SetStackPointer(jssp);
-
// Retrieve the handler context, SP and FP.
__ Mov(cp, Operand(pending_handler_context_address));
__ Ldr(cp, MemOperand(cp));
- __ Mov(jssp, Operand(pending_handler_sp_address));
- __ Ldr(jssp, MemOperand(jssp));
- __ Mov(csp, jssp);
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, Operand(pending_handler_sp_address));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Mov(csp, scratch);
+ }
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -481,9 +472,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Br(x10);
}
-
// This is the entry point from C++. 5 arguments are provided in x0-x4.
-// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// See use of the JSEntryFunction for example in src/execution.cc.
// Input:
// x0: code entry.
// x1: function.
@@ -493,7 +483,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Output:
// x0: result.
void JSEntryStub::Generate(MacroAssembler* masm) {
- DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
// Enable instruction instrumentation. This only works on the simulator, and
@@ -502,21 +491,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
- // Push callee-saved registers and synchronize the system stack pointer (csp)
- // and the JavaScript stack pointer (jssp).
- //
- // We must not write to jssp until after the PushCalleeSavedRegisters()
- // call, since jssp is itself a callee-saved register.
- __ SetStackPointer(csp);
__ PushCalleeSavedRegisters();
- __ Mov(jssp, csp);
- __ SetStackPointer(jssp);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Set up the reserved register for 0.0.
__ Fmov(fp_zero, 0.0);
+ // Initialize the root array register
+ __ InitializeRootRegister();
+
// Build an entry frame (see layout below).
StackFrame::Type marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
@@ -527,7 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
- __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+ __ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@@ -546,14 +530,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Str(fp, MemOperand(x10));
__ Bind(&done);
- __ Push(x12);
+ __ Push(x12, padreg);
// The frame set up looks like this:
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frame marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -583,8 +568,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&invoke);
// Push new stack handler.
- DCHECK(jssp.Is(__ StackPointer()));
- static_assert(StackHandlerConstants::kSize == 1 * kPointerSize,
+ static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kSize");
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
@@ -592,10 +576,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Link the current handler as the next handler.
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
- __ Push(x10);
+ __ Push(padreg, x10);
// Set this new handler as the current one.
- __ Str(jssp, MemOperand(x11));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, __ StackPointer());
+ __ Str(scratch, MemOperand(x11));
+ }
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
@@ -612,37 +601,32 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// x2: receiver.
// x3: argc.
// x4: argv.
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Pop the stack handler and unlink this frame from the handler chain.
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kNextOffset");
- __ Pop(x10);
+ __ Pop(x10, padreg);
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
- __ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ __ Drop(StackHandlerConstants::kSlotCount - 2);
__ Str(x10, MemOperand(x11));
__ Bind(&exit);
// x0 holds the result.
// The stack pointer points to the top of the entry frame pushed on entry from
// C++ (at the beginning of this stub):
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frmae marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
{
Register c_entry_fp = x11;
- __ Pop(x10, c_entry_fp);
+ __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x12, ExternalReference(js_entry_sp));
@@ -656,21 +640,17 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
// Reset the stack to the callee saved registers.
- __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
+ "Size of entry frame is not a multiple of 16 bytes");
+ __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
// Restore the callee-saved registers and return.
- DCHECK(jssp.Is(__ StackPointer()));
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
- // After this point, we must not modify jssp because it is a callee-saved
- // register which we have just restored.
__ Ret();
}
-// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
-// a "Push lr" instruction, followed by a call.
+// The entry hook is a Push (stp) instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+ (1 * kInstructionSize) + Assembler::kCallSizeWithRelocation;
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
@@ -748,14 +728,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // When calling into C++ code the stack pointer must be csp.
- // Therefore this code must use csp for peek/poke operations when the
- // stub is generated. When the stub is called
- // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
- // and configure the stack pointer *before* doing the call.
- const Register old_stack_pointer = __ StackPointer();
- __ SetStackPointer(csp);
-
// Put return address on the stack (accessible to GC through exit frame pc).
__ Poke(lr, 0);
// Call the C++ function.
@@ -764,8 +736,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ Peek(lr, 0);
__ AssertFPCRState();
__ Ret();
-
- __ SetStackPointer(old_stack_pointer);
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
@@ -806,7 +776,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
@@ -856,7 +826,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Ldr(x10, FieldMemOperand(allocation_site, 0));
__ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
&normal_sequence);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store 'kind'
@@ -884,7 +854,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -972,7 +942,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
// We should either have undefined in the allocation_site register or a
@@ -1069,7 +1039,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
}
@@ -1085,7 +1055,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
Label done;
__ Cmp(x3, PACKED_ELEMENTS);
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
@@ -1202,7 +1174,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
__ Cmp(w1, level_reg);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ Sub(level_reg, level_reg, 1);
__ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
@@ -1218,7 +1190,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
- __ LeaveExitFrame(false, x1);
+ __ LeaveExitFrame(false, x1, x5);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index f945830045..b02dd5d2d7 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -101,7 +101,6 @@ const int kIp1Code = 17;
const int kFramePointerRegCode = 29;
const int kLinkRegCode = 30;
const int kZeroRegCode = 31;
-const int kJSSPCode = 28;
const int kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index d4cb200de6..26ec06e094 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -31,7 +31,7 @@ class CacheLineSizes {
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of cache lines in words as a
// power of two.
- return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
}
uint32_t cache_type_register_;
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 5f372eadd2..8269e8e50a 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -108,11 +108,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PushCPURegList(saved_float_registers);
// We save all the registers except sp, lr and the masm scratches.
- CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
- // TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
- saved_registers.Combine(padreg);
saved_registers.Combine(fp);
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
@@ -220,8 +218,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
- __ Ldr(__ StackPointer(),
- MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ __ Mov(__ StackPointer(), scratch);
+ }
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -324,7 +326,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
if (__ emit_debug_code()) {
// Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
__ Cmp(entry_id, count());
- __ Check(lo, kOffsetOutOfRange);
+ __ Check(lo, AbortReason::kOffsetOutOfRange);
}
}
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index c9b2c9a4aa..41c654b214 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -256,27 +256,26 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
DCHECK((reg_size == kXRegSizeInBits) ||
- ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
+ ((reg_size == kWRegSizeInBits) && (value <= 0xFFFFFFFF)));
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
- if (((value & 0xffffffffffff0000UL) == 0UL) ||
- ((value & 0xffffffff0000ffffUL) == 0UL) ||
- ((value & 0xffff0000ffffffffUL) == 0UL) ||
- ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ if (((value & 0xFFFFFFFFFFFF0000UL) == 0UL) ||
+ ((value & 0xFFFFFFFF0000FFFFUL) == 0UL) ||
+ ((value & 0xFFFF0000FFFFFFFFUL) == 0UL) ||
+ ((value & 0x0000FFFFFFFFFFFFUL) == 0UL)) {
return true;
}
// Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
if ((reg_size == kXRegSizeInBits) &&
- (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
- ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
- ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
- ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ (((value & 0xFFFFFFFFFFFF0000UL) == 0xFFFFFFFFFFFF0000UL) ||
+ ((value & 0xFFFFFFFF0000FFFFUL) == 0xFFFFFFFF0000FFFFUL) ||
+ ((value & 0xFFFF0000FFFFFFFFUL) == 0xFFFF0000FFFFFFFFUL) ||
+ ((value & 0x0000FFFFFFFFFFFFUL) == 0x0000FFFFFFFFFFFFUL))) {
return true;
}
- if ((reg_size == kWRegSizeInBits) &&
- (((value & 0xffff0000) == 0xffff0000) ||
- ((value & 0x0000ffff) == 0x0000ffff))) {
+ if ((reg_size == kWRegSizeInBits) && (((value & 0xFFFF0000) == 0xFFFF0000) ||
+ ((value & 0x0000FFFF) == 0x0000FFFF))) {
return true;
}
return false;
@@ -3332,8 +3331,6 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
- } else if (reg.IsX() && (reg.code() == 28)) {
- AppendToOutput("jssp");
} else if (reg.IsX() && (reg.code() == 29)) {
AppendToOutput("fp");
} else if (reg.IsX() && (reg.code() == 30)) {
@@ -3469,7 +3466,7 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
case 'e':
// This is register Rm, but using a 4-bit specifier. Used in NEON
// by-element instructions.
- reg_num = (instr->Rm() & 0xf);
+ reg_num = (instr->Rm() & 0xF);
break;
case 'a':
reg_num = instr->Ra();
@@ -3545,8 +3542,6 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
return field_len;
default:
UNREACHABLE();
- reg_type = CPURegister::kRegister;
- reg_size = kXRegSizeInBits;
}
if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) &&
@@ -3569,7 +3564,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
if (format[5] == 'N') imm = ~imm;
- if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
+ if (!instr->SixtyFourBits()) imm &= UINT64_C(0xFFFFFFFF);
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK_EQ(format[5], 'L');
@@ -3696,7 +3691,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
vm_index = (vm_index << 1) | instr->NEONM();
}
AppendToOutput("%d", vm_index);
- return strlen("IVByElemIndex");
+ return static_cast<int>(strlen("IVByElemIndex"));
}
case 'I': { // INS element.
if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
@@ -3709,11 +3704,11 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
rn_index = imm4 >> tz;
if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
AppendToOutput("%d", rd_index);
- return strlen("IVInsIndex1");
+ return static_cast<int>(strlen("IVInsIndex1"));
} else if (strncmp(format, "IVInsIndex2",
strlen("IVInsIndex2")) == 0) {
AppendToOutput("%d", rn_index);
- return strlen("IVInsIndex2");
+ return static_cast<int>(strlen("IVInsIndex2"));
}
}
return 0;
@@ -3728,38 +3723,38 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
0) {
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
instr->ImmNEONFP32());
- return strlen("IVMIImmFPSingle");
+ return static_cast<int>(strlen("IVMIImmFPSingle"));
} else if (strncmp(format, "IVMIImmFPDouble",
strlen("IVMIImmFPDouble")) == 0) {
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
instr->ImmNEONFP64());
- return strlen("IVMIImmFPDouble");
+ return static_cast<int>(strlen("IVMIImmFPDouble"));
} else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
uint64_t imm8 = instr->ImmNEONabcdefgh();
AppendToOutput("#0x%" PRIx64, imm8);
- return strlen("IVMIImm8");
+ return static_cast<int>(strlen("IVMIImm8"));
} else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
uint64_t imm8 = instr->ImmNEONabcdefgh();
uint64_t imm = 0;
for (int i = 0; i < 8; ++i) {
if (imm8 & (1 << i)) {
- imm |= (UINT64_C(0xff) << (8 * i));
+ imm |= (UINT64_C(0xFF) << (8 * i));
}
}
AppendToOutput("#0x%" PRIx64, imm);
- return strlen("IVMIImm");
+ return static_cast<int>(strlen("IVMIImm"));
} else if (strncmp(format, "IVMIShiftAmt1",
strlen("IVMIShiftAmt1")) == 0) {
int cmode = instr->NEONCmode();
int shift_amount = 8 * ((cmode >> 1) & 3);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt1");
+ return static_cast<int>(strlen("IVMIShiftAmt1"));
} else if (strncmp(format, "IVMIShiftAmt2",
strlen("IVMIShiftAmt2")) == 0) {
int cmode = instr->NEONCmode();
int shift_amount = 8 << (cmode & 1);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt2");
+ return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
UNIMPLEMENTED();
return 0;
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 507cbd1c2b..48909d5b2d 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -9,7 +9,6 @@ namespace v8 {
namespace internal {
static const int kX0DwarfCode = 0;
-static const int kJsSpDwarfCode = 28;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
static const int kCSpDwarfCode = 31;
@@ -29,13 +28,11 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
- case kRegCode_x28:
- return kJsSpDwarfCode;
case kRegCode_x29:
return kFpDwarfCode;
case kRegCode_x30:
return kLrDwarfCode;
- case kRegCode_x31:
+ case kSPRegInternalCode:
return kCSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
@@ -54,8 +51,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
- case kJsSpDwarfCode:
- return "jssp";
case kCSpDwarfCode:
return "csp"; // This could be zr as well
default:
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 882a57a851..a337079786 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -8,10 +8,31 @@
namespace v8 {
namespace internal {
+// The layout of an EntryFrame is as follows:
+//
+// slot Entry frame
+// +---------------------+-----------------------
+// 0 | bad frame pointer | <-- frame ptr
+// | (0xFFF.. FF) |
+// |- - - - - - - - - - -|
+// 1 | stack frame marker |
+// | (ENTRY) |
+// |- - - - - - - - - - -|
+// 2 | stack frame marker |
+// | (0) |
+// |- - - - - - - - - - -|
+// 3 | C entry FP |
+// |- - - - - - - - - - -|
+// 4 | JS entry frame |
+// | marker |
+// |- - - - - - - - - - -|
+// 5 | padding | <-- stack ptr
+// -----+---------------------+-----------------------
+//
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kFixedFrameSize = 6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/arm64/instructions-arm64-constants.cc b/deps/v8/src/arm64/instructions-arm64-constants.cc
index 5f1b49fbdc..0a15287417 100644
--- a/deps/v8/src/arm64/instructions-arm64-constants.cc
+++ b/deps/v8/src/arm64/instructions-arm64-constants.cc
@@ -21,26 +21,26 @@ namespace internal {
// then move this code back into instructions-arm64.cc with the same types
// that client code uses.
-extern const uint16_t kFP16PositiveInfinity = 0x7c00;
-extern const uint16_t kFP16NegativeInfinity = 0xfc00;
-extern const uint32_t kFP32PositiveInfinity = 0x7f800000;
-extern const uint32_t kFP32NegativeInfinity = 0xff800000;
-extern const uint64_t kFP64PositiveInfinity = 0x7ff0000000000000UL;
-extern const uint64_t kFP64NegativeInfinity = 0xfff0000000000000UL;
+extern const uint16_t kFP16PositiveInfinity = 0x7C00;
+extern const uint16_t kFP16NegativeInfinity = 0xFC00;
+extern const uint32_t kFP32PositiveInfinity = 0x7F800000;
+extern const uint32_t kFP32NegativeInfinity = 0xFF800000;
+extern const uint64_t kFP64PositiveInfinity = 0x7FF0000000000000UL;
+extern const uint64_t kFP64NegativeInfinity = 0xFFF0000000000000UL;
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
-extern const uint64_t kFP64SignallingNaN = 0x7ff000007f800001;
-extern const uint32_t kFP32SignallingNaN = 0x7f800001;
+extern const uint64_t kFP64SignallingNaN = 0x7FF000007F800001;
+extern const uint32_t kFP32SignallingNaN = 0x7F800001;
// A similar value, but as a quiet NaN.
-extern const uint64_t kFP64QuietNaN = 0x7ff800007fc00001;
-extern const uint32_t kFP32QuietNaN = 0x7fc00001;
+extern const uint64_t kFP64QuietNaN = 0x7FF800007FC00001;
+extern const uint32_t kFP32QuietNaN = 0x7FC00001;
// The default NaN values (for FPCR.DN=1).
-extern const uint64_t kFP64DefaultNaN = 0x7ff8000000000000UL;
-extern const uint32_t kFP32DefaultNaN = 0x7fc00000;
-extern const uint16_t kFP16DefaultNaN = 0x7e00;
+extern const uint64_t kFP64DefaultNaN = 0x7FF8000000000000UL;
+extern const uint32_t kFP32DefaultNaN = 0x7FC00000;
+extern const uint16_t kFP16DefaultNaN = 0x7E00;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 9fc2adb6f7..8e9cce7197 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -189,8 +189,8 @@ void Instrument::DumpEventMarker(unsigned marker) {
// line.
static Counter* counter = GetCounter("Instruction");
- fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
- (marker >> 8) & 0xff, counter->count());
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xFF,
+ (marker >> 8) & 0xFF, counter->count());
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 300d42d565..17b058bd01 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return x0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return x4; }
-
const Register StoreDescriptor::ReceiverRegister() { return x1; }
const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
@@ -209,6 +207,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 9bef2b378b..0861551d89 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1048,7 +1048,6 @@ void MacroAssembler::AlignAndSetCSPForFrame() {
DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
- SetStackPointer(csp);
}
void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
@@ -1140,22 +1139,6 @@ void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
Scvtf(dst, src, kSmiShift);
}
-
-void MacroAssembler::SmiTagAndPush(Register src) {
- STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
- (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
- (kSmiTag == 0));
- Push(src.W(), wzr);
-}
-
-
-void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
- STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
- (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
- (kSmiTag == 0));
- Push(src1.W(), wzr, src2.W(), wzr);
-}
-
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
@@ -1222,7 +1205,7 @@ void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
if (emit_debug_code()) {
Label ok;
Tbz(obj, 0, &ok);
- Abort(kObjectTagged);
+ Abort(AbortReason::kObjectTagged);
Bind(&ok);
}
Orr(tagged_obj, obj, kHeapObjectTag);
@@ -1234,7 +1217,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
if (emit_debug_code()) {
Label ok;
Tbnz(obj, 0, &ok);
- Abort(kObjectNotTagged);
+ Abort(AbortReason::kObjectNotTagged);
Bind(&ok);
}
Bic(untagged_obj, obj, kHeapObjectTag);
@@ -1246,7 +1229,10 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, Operand(handle));
- Push(tmp);
+ // This is only used in test-heap.cc, for generating code that is not
+ // executed. Push a padding slot together with the handle here, to
+ // satisfy the alignment requirement.
+ Push(padreg, tmp);
}
void TurboAssembler::Push(Smi* smi) {
@@ -1355,21 +1341,31 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
void TurboAssembler::DropArguments(const Register& count,
ArgumentsCountMode mode) {
+ int extra_slots = 1; // Padding slot.
if (mode == kCountExcludesReceiver) {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Add(tmp, count, 1);
- Drop(tmp);
- } else {
- Drop(count);
+ // Add a slot for the receiver.
+ ++extra_slots;
+ }
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Add(tmp, count, extra_slots);
+ Bic(tmp, tmp, 1);
+ Drop(tmp, kXRegSize);
+}
+
+void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
+ if (mode == kCountExcludesReceiver) {
+ // Add a slot for the receiver.
+ ++count;
}
+ Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
- Drop(count, unit_size);
+void TurboAssembler::DropSlots(int64_t count) {
+ Drop(RoundUp(count, 2), kXRegSize);
}
-void TurboAssembler::PushArgument(const Register& arg) { Push(arg); }
+void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5f69f0e1e2..3869046f74 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -44,7 +44,6 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
#endif
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()),
- sp_(jssp),
use_real_aborts_(true) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
@@ -160,7 +159,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
UNREACHABLE();
}
} else if ((rd.Is64Bits() && (immediate == -1L)) ||
- (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ (rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
switch (op) {
case AND:
Mov(rd, rn);
@@ -252,15 +251,15 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
- // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+ // non-0xFFFF immX, and a move-keep for subsequent non-zero immX.
uint64_t ignored_halfword = 0;
bool invert_move = false;
- // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // If the number of 0xFFFF halfwords is greater than the number of 0x0000
// halfwords, it's more efficient to use move-inverted.
if (CountClearHalfWords(~imm, reg_size) >
CountClearHalfWords(imm, reg_size)) {
- ignored_halfword = 0xffffL;
+ ignored_halfword = 0xFFFFL;
invert_move = true;
}
@@ -274,11 +273,11 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
DCHECK_EQ(reg_size % 16, 0);
bool first_mov_done = false;
for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
- uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ uint64_t imm16 = (imm >> (16 * i)) & 0xFFFFL;
if (imm16 != ignored_halfword) {
if (!first_mov_done) {
if (invert_move) {
- movn(temp, (~imm16) & 0xffffL, 16 * i);
+ movn(temp, (~imm16) & 0xFFFFL, 16 * i);
} else {
movz(temp, imm16, 16 * i);
}
@@ -356,18 +355,18 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
- int byte1 = (imm & 0xff);
- int byte2 = ((imm >> 8) & 0xff);
+ int byte1 = (imm & 0xFF);
+ int byte2 = ((imm >> 8) & 0xFF);
if (byte1 == byte2) {
movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
} else if (byte1 == 0) {
movi(vd, byte2, LSL, 8);
} else if (byte2 == 0) {
movi(vd, byte1);
- } else if (byte1 == 0xff) {
- mvni(vd, ~byte2 & 0xff, LSL, 8);
- } else if (byte2 == 0xff) {
- mvni(vd, ~byte1 & 0xff);
+ } else if (byte1 == 0xFF) {
+ mvni(vd, ~byte2 & 0xFF, LSL, 8);
+ } else if (byte2 == 0xFF) {
+ mvni(vd, ~byte1 & 0xFF);
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireW();
@@ -382,11 +381,11 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
uint8_t bytes[sizeof(imm)];
memcpy(bytes, &imm, sizeof(imm));
- // All bytes are either 0x00 or 0xff.
+ // All bytes are either 0x00 or 0xFF.
{
bool all0orff = true;
for (int i = 0; i < 4; ++i) {
- if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xFF)) {
all0orff = false;
break;
}
@@ -400,47 +399,47 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
// Of the 4 bytes, only one byte is non-zero.
for (int i = 0; i < 4; i++) {
- if ((imm & (0xff << (i * 8))) == imm) {
+ if ((imm & (0xFF << (i * 8))) == imm) {
movi(vd, bytes[i], LSL, i * 8);
return;
}
}
- // Of the 4 bytes, only one byte is not 0xff.
+ // Of the 4 bytes, only one byte is not 0xFF.
for (int i = 0; i < 4; i++) {
- uint32_t mask = ~(0xff << (i * 8));
+ uint32_t mask = ~(0xFF << (i * 8));
if ((imm & mask) == mask) {
- mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ mvni(vd, ~bytes[i] & 0xFF, LSL, i * 8);
return;
}
}
// Immediate is of the form 0x00MMFFFF.
- if ((imm & 0xff00ffff) == 0x0000ffff) {
+ if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
movi(vd, bytes[2], MSL, 16);
return;
}
// Immediate is of the form 0x0000MMFF.
- if ((imm & 0xffff00ff) == 0x000000ff) {
+ if ((imm & 0xFFFF00FF) == 0x000000FF) {
movi(vd, bytes[1], MSL, 8);
return;
}
// Immediate is of the form 0xFFMM0000.
- if ((imm & 0xff00ffff) == 0xff000000) {
- mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ if ((imm & 0xFF00FFFF) == 0xFF000000) {
+ mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
return;
}
// Immediate is of the form 0xFFFFMM00.
- if ((imm & 0xffff00ff) == 0xffff0000) {
- mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
+ mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
return;
}
// Top and bottom 16-bits are equal.
- if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
- Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
return;
}
@@ -454,12 +453,12 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
}
void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
- // All bytes are either 0x00 or 0xff.
+ // All bytes are either 0x00 or 0xFF.
{
bool all0orff = true;
for (int i = 0; i < 8; ++i) {
- int byteval = (imm >> (i * 8)) & 0xff;
- if (byteval != 0 && byteval != 0xff) {
+ int byteval = (imm >> (i * 8)) & 0xFF;
+ if (byteval != 0 && byteval != 0xFF) {
all0orff = false;
break;
}
@@ -471,8 +470,8 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
}
// Top and bottom 32-bits are equal.
- if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
- Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
return;
}
@@ -547,7 +546,7 @@ unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
DCHECK_EQ(reg_size % 8, 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
- if ((imm & 0xffff) == 0) {
+ if ((imm & 0xFFFF) == 0) {
count++;
}
imm >>= 16;
@@ -563,9 +562,8 @@ bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
-
// The movn instruction can generate immediates containing an arbitrary 16-bit
-// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+// half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
return IsImmMovz(~imm, reg_size);
}
@@ -1375,7 +1373,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Str(src, MemOperand(StackPointer(), offset));
@@ -1387,7 +1385,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Ldr(dst, MemOperand(StackPointer(), offset));
@@ -1426,7 +1424,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
stp(d8, d9, tos);
stp(x29, x30, tos);
- stp(x27, x28, tos); // x28 = jssp
+ stp(x27, x28, tos);
stp(x25, x26, tos);
stp(x23, x24, tos);
stp(x21, x22, tos);
@@ -1448,7 +1446,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(x21, x22, tos);
ldp(x23, x24, tos);
ldp(x25, x26, tos);
- ldp(x27, x28, tos); // x28 = jssp
+ ldp(x27, x28, tos);
ldp(x29, x30, tos);
ldp(d8, d9, tos);
@@ -1479,7 +1477,7 @@ void TurboAssembler::AssertStackConsistency() {
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
- Abort(kTheCurrentStackPointerIsBelowCsp);
+ Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
}
bind(&ok);
@@ -1531,7 +1529,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Subs(pointer1, pointer1, pointer2);
B(lt, &pointer1_below_pointer2);
Cmp(pointer1, count);
- Check(ge, kOffsetOutOfRange);
+ Check(ge, AbortReason::kOffsetOutOfRange);
Bind(&pointer1_below_pointer2);
Add(pointer1, pointer1, pointer2);
}
@@ -1595,7 +1593,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) {
B(eq, &done);
Bind(&unexpected_mode);
- Abort(kUnexpectedFPCRMode);
+ Abort(AbortReason::kUnexpectedFPCRMode);
Bind(&done);
}
@@ -1632,7 +1630,7 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
-void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
+void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
@@ -1640,7 +1638,7 @@ void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
}
}
-void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
@@ -1650,44 +1648,44 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAFixedArray);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFixedArray);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
- AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
- AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
UseScratchRegisterScope temps(this);
@@ -1704,7 +1702,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -1716,7 +1714,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
Bind(&done_checking);
}
}
@@ -1726,7 +1724,7 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
Tbz(value, sign_bit, &done);
- Abort(kUnexpectedNegativeValue);
+ Abort(AbortReason::kUnexpectedNegativeValue);
Bind(&done);
}
}
@@ -1855,72 +1853,14 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
- // We rely on the frame alignment being 16 bytes, which means we never need
- // to align the CSP by an unknown number of bytes and we always know the delta
- // between the stack pointer and the frame pointer.
- DCHECK_EQ(ActivationFrameAlignment(), 16);
-
- // If the stack pointer is not csp, we need to derive an aligned csp from the
- // current stack pointer.
- const Register old_stack_pointer = StackPointer();
- if (!csp.Is(old_stack_pointer)) {
- AssertStackConsistency();
-
- int sp_alignment = ActivationFrameAlignment();
- // The current stack pointer is a callee saved register, and is preserved
- // across the call.
- DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
-
- // If more than eight arguments are passed to the function, we expect the
- // ninth argument onwards to have been placed on the csp-based stack
- // already. We assume csp already points to the last stack-passed argument
- // in that case.
- // Otherwise, align and synchronize the system stack pointer with jssp.
- if (num_of_reg_args <= kRegisterPassedArguments) {
- Bic(csp, old_stack_pointer, sp_alignment - 1);
- }
- SetStackPointer(csp);
- }
-
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
- if (csp.Is(old_stack_pointer)) {
- if (num_of_reg_args > kRegisterPassedArguments) {
- // Drop the register passed arguments.
- int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
- Drop(claim_slots);
- }
- } else {
- DCHECK(jssp.Is(old_stack_pointer));
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
-
- if (num_of_reg_args > kRegisterPassedArguments) {
- // We don't need to drop stack arguments, as the stack pointer will be
- // jssp when returning from this function. However, in debug builds, we
- // can check that jssp is as expected.
- int claim_slots =
- RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
-
- // Check jssp matches the previous value on the stack.
- Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
- Cmp(jssp, temp);
- Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
- } else {
- // Because the stack pointer must be aligned on a 16-byte boundary, the
- // aligned csp can be up to 12 bytes below the jssp. This is the case
- // where we only pushed one W register on top of an aligned jssp.
- Sub(temp, csp, old_stack_pointer);
- // We want temp <= 0 && temp >= -12.
- Cmp(temp, 0);
- Ccmp(temp, -12, NFlag, le);
- Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
- }
- }
- SetStackPointer(old_stack_pointer);
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+ Drop(claim_slots);
}
}
@@ -1997,10 +1937,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- DCHECK_EQ((imm >> 48) & 0xffff, 0);
- movz(temp, (imm >> 0) & 0xffff, 0);
- movk(temp, (imm >> 16) & 0xffff, 16);
- movk(temp, (imm >> 32) & 0xffff, 32);
+ DCHECK_EQ((imm >> 48) & 0xFFFF, 0);
+ movz(temp, (imm >> 0) & 0xFFFF, 0);
+ movk(temp, (imm >> 16) & 0xFFFF, 16);
+ movk(temp, (imm >> 32) & 0xFFFF, 32);
} else {
Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
}
@@ -2160,23 +2100,32 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
- add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
+ // Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
+ // potential padding.
+ Add(dst_reg, dst_reg, 15);
+ Bic(dst_reg, dst_reg, 15);
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
- add(src_reg, src_reg, Operand(kPointerSize));
+ Add(src_reg, StackPointer(),
+ Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, src_reg, kPointerSize);
} else {
- add(src_reg, jssp,
- Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ Add(src_reg, StackPointer(),
+ (callee_args_count.immediate() + 1) * kPointerSize);
}
+ // Round src_reg up to a multiple of 16 bytes, so we include any potential
+ // padding in the copy.
+ Add(src_reg, src_reg, 15);
+ Bic(src_reg, src_reg, 15);
+
if (FLAG_debug_code) {
Cmp(src_reg, dst_reg);
- Check(lo, kStackAccessBelowStackPointer);
+ Check(lo, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -2196,12 +2145,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(jssp, src_reg);
+ Cmp(StackPointer(), src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(jssp, dst_reg);
- SetStackPointer(jssp);
+ Mov(StackPointer(), dst_reg);
AssertStackConsistency();
}
@@ -2412,12 +2360,12 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
// the modulo operation on an integer register so we convert to a 64-bit
// integer.
//
- // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
// when the double is out of range. NaNs and infinities will be converted to 0
// (as ECMA-262 requires).
Fcvtzs(result.X(), double_input);
- // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
// representable using a double, so if the result is one of those then we know
// that saturation occurred, and we need to manually handle the conversion.
//
@@ -2437,17 +2385,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
- const Register old_stack_pointer = StackPointer();
- if (csp.Is(old_stack_pointer)) {
- // This currently only happens during compiler-unittest. If it arises
- // during regular code generation the DoubleToI stub should be updated to
- // cope with csp and have an extra parameter indicating which stack pointer
- // it should use.
- Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
- Mov(jssp, csp);
- SetStackPointer(jssp);
- }
-
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
@@ -2458,13 +2395,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
Pop(xzr, lr); // xzr to drop the double input on the stack.
- if (csp.Is(old_stack_pointer)) {
- Mov(csp, jssp);
- SetStackPointer(csp);
- AssertStackConsistency();
- Pop(xzr, jssp);
- }
-
Bind(&done);
// Keep our invariant that the upper 32 bits are zero.
Uxtw(result.W(), result.W());
@@ -2472,7 +2402,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2481,15 +2411,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) {
- DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
- // jssp[4] : lr
- // jssp[3] : fp
- // jssp[1] : type
- // jssp[0] : [code object]
+ Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ // sp[4] : lr
+ // sp[3] : fp
+ // sp[1] : type
+ // sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2502,7 +2431,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// csp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
- DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
// Users of this frame type push a context pointer after the type field,
@@ -2511,11 +2439,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
- // jssp[3] : lr
- // jssp[2] : fp
- // jssp[1] : type
- // jssp[0] : cp
+ Add(fp, StackPointer(),
+ TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : cp
}
}
@@ -2526,10 +2455,9 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
AssertStackConsistency();
Pop(fp, lr);
} else {
- DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(jssp, fp);
+ Mov(StackPointer(), fp);
AssertStackConsistency();
Pop(fp, lr);
}
@@ -2560,7 +2488,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space,
StackFrame::Type frame_type) {
- DCHECK(jssp.Is(StackPointer()));
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -2576,7 +2503,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
- // jssp -> fp[-32]: padding
+ // sp -> fp[-32]: padding
STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
@@ -2610,23 +2537,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
- // jssp[8]: Extra space reserved for caller (if extra_space != 0).
- // jssp -> jssp[0]: Space reserved for the return address.
+ // sp[8]: Extra space reserved for caller (if extra_space != 0).
+ // sp -> sp[0]: Space reserved for the return address.
- // Align and synchronize the system stack pointer with jssp.
- AlignAndSetCSPForFrame();
DCHECK(csp.Is(StackPointer()));
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: STUB marker
- // fp[-16]: Space reserved for SPOffset.
- // fp[-24]: CodeObject()
- // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
- // csp[8]: Memory reserved for the caller if extra_space != 0.
- // Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
@@ -2638,7 +2553,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
- const Register& scratch) {
+ const Register& scratch,
+ const Register& scratch2) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
@@ -2652,9 +2568,10 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
if (emit_debug_code()) {
// Also emit debug code to clear the cp in the top frame.
+ Mov(scratch2, Operand(Context::kInvalidContext));
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
- Str(xzr, MemOperand(scratch));
+ Str(scratch2, MemOperand(scratch));
}
// Clear the frame pointer from the top frame.
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@@ -2665,8 +2582,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(jssp, fp);
- SetStackPointer(jssp);
+ Mov(csp, fp);
AssertStackConsistency();
Pop(fp, lr);
}
@@ -2830,14 +2746,12 @@ void MacroAssembler::PushSafepointRegisters() {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6ffcffff);
+ DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x28 (jssp) because JS stack pointer doesn't need to be included in
- // safepoint registers.
// - x31 (csp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
@@ -2845,12 +2759,9 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// safepoint register slots.
if ((reg_code >= 0) && (reg_code <= 15)) {
return reg_code;
- } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ } else if ((reg_code >= 18) && (reg_code <= 30)) {
// Skip ip0 and ip1.
return reg_code - 2;
- } else if ((reg_code == 29) || (reg_code == 30)) {
- // Also skip jssp.
- return reg_code - 3;
} else {
// This register has no safepoint register slot.
UNREACHABLE();
@@ -2909,7 +2820,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
Tst(scratch, kPointerSize - 1);
B(eq, &ok);
- Abort(kUnalignedCellInWriteBarrier);
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
Bind(&ok);
}
@@ -2975,11 +2886,9 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kFPMode));
- Push(object);
- Push(address);
+ Push(object, address);
- Pop(slot_parameter);
- Pop(object_parameter);
+ Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
@@ -3008,7 +2917,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Ldr(temp, MemOperand(address));
Cmp(temp, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
@@ -3052,7 +2961,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason) {
if (emit_debug_code()) {
Check(cond, reason);
}
@@ -3060,14 +2969,14 @@ void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index,
- BailoutReason reason) {
+ AbortReason reason) {
if (emit_debug_code()) {
CompareRoot(reg, index);
Check(eq, reason);
}
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label ok;
B(cond, &ok);
Abort(reason);
@@ -3075,10 +2984,10 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
Bind(&ok);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
RecordComment("Abort message: ");
- RecordComment(GetBailoutReason(reason));
+ RecordComment(GetAbortReason(reason));
if (FLAG_trap_on_abort) {
Brk(0);
@@ -3086,13 +2995,6 @@ void TurboAssembler::Abort(BailoutReason reason) {
}
#endif
- // Abort is used in some contexts where csp is the stack pointer. In order to
- // simplify the CallRuntime code, make sure that jssp is the stack pointer.
- // There is no risk of register corruption here because Abort doesn't return.
- Register old_stack_pointer = StackPointer();
- SetStackPointer(jssp);
- Mov(jssp, old_stack_pointer);
-
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.
RegList old_tmp_list = TmpList()->list();
@@ -3128,11 +3030,10 @@ void TurboAssembler::Abort(BailoutReason reason) {
{
BlockPoolsScope scope(this);
Bind(&msg_address);
- EmitStringData(GetBailoutReason(reason));
+ EmitStringData(GetAbortReason(reason));
}
}
- SetStackPointer(old_stack_pointer);
TmpList()->set_list(old_tmp_list);
}
@@ -3266,7 +3167,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We don't pass any arguments on the stack, but we still need to align the C
// stack pointer to a 16-byte boundary for PCS compliance.
if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xf);
+ Bic(csp, StackPointer(), 0xF);
}
CallPrintf(arg_count, pcs);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 035558fd81..47c08f2622 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -216,12 +216,6 @@ class TurboAssembler : public Assembler {
bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif
- // Set the current stack pointer, but don't generate any code.
- inline void SetStackPointer(const Register& stack_pointer) {
- DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
- sp_ = stack_pointer;
- }
-
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
@@ -574,17 +568,18 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
+ void Assert(Condition cond, AbortReason reason);
- void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+ void AssertSmi(Register object,
+ AbortReason reason = AbortReason::kOperandIsNotASmi);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
+ void Check(Condition cond, AbortReason reason);
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
// Print a message to stderr and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
// If emit_debug_code() is true, emit a run-time check to ensure that
// StackPointer() does not point below the system stack pointer.
@@ -619,8 +614,8 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
- // Return the current stack pointer, as set by SetStackPointer.
- inline const Register& StackPointer() const { return sp_; }
+ // Return the stack pointer.
+ inline const Register& StackPointer() const { return csp; }
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
@@ -711,25 +706,22 @@ class TurboAssembler : public Assembler {
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
- // Drop arguments from stack without actually accessing memory.
- // This will currently drop 'count' arguments from the stack.
+ // Drop 'count' arguments from the stack, rounded up to a multiple of two,
+ // without actually accessing memory.
// We assume the size of the arguments is the pointer size.
// An optional mode argument is passed, which can indicate we need to
// explicitly add the receiver to the count.
- // TODO(arm64): Update this to round up the number of bytes dropped to
- // a multiple of 16, so that we can remove jssp.
enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
inline void DropArguments(const Register& count,
ArgumentsCountMode mode = kCountIncludesReceiver);
+ inline void DropArguments(int64_t count,
+ ArgumentsCountMode mode = kCountIncludesReceiver);
- // Drop slots from stack without actually accessing memory.
- // This will currently drop 'count' slots of the given size from the stack.
- // TODO(arm64): Update this to round up the number of bytes dropped to
- // a multiple of 16, so that we can remove jssp.
- inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
+ // Drop 'count' slots from stack, rounded up to a multiple of two, without
+ // actually accessing memory.
+ inline void DropSlots(int64_t count);
- // Push a single argument to the stack.
- // TODO(arm64): Update this to push a padding slot above the argument.
+ // Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
// Re-synchronizes the system stack pointer (csp) with the current stack
@@ -769,8 +761,7 @@ class TurboAssembler : public Assembler {
LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
- // Push or pop up to 4 registers of the same width to or from the stack,
- // using the current stack pointer as set by SetStackPointer.
+ // Push or pop up to 4 registers of the same width to or from the stack.
//
// If an argument register is 'NoReg', all further arguments are also assumed
// to be 'NoReg', and are thus not pushed or popped.
@@ -784,9 +775,8 @@ class TurboAssembler : public Assembler {
// It is not valid to pop into the same register more than once in one
// operation, not even into the zero register.
//
- // If the current stack pointer (as set by SetStackPointer) is csp, then it
- // must be aligned to 16 bytes on entry and the total size of the specified
- // registers must also be a multiple of 16 bytes.
+ // The stack pointer must be aligned to 16 bytes on entry and the total size
+ // of the specified registers must also be a multiple of 16 bytes.
//
// Even if the current stack pointer is not the system stack pointer (csp),
// Push (and derived methods) will still modify the system stack pointer in
@@ -1291,9 +1281,6 @@ class TurboAssembler : public Assembler {
CPURegList tmp_list_;
CPURegList fptmp_list_;
- // The register to use as a stack pointer for stack operations.
- Register sp_;
-
bool use_real_aborts_;
// Helps resolve branching to labels potentially out of range.
@@ -1707,10 +1694,6 @@ class MacroAssembler : public TurboAssembler {
//
// Note that registers are not checked for invalid values. Use this method
// only if you know that the GC won't try to examine the values on the stack.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
void PushCalleeSavedRegisters();
// Restore the callee-saved registers (as defined by AAPCS64).
@@ -1719,10 +1702,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
// Floating-point registers are popped after general-purpose registers, and
// thus come from higher addresses.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
void PopCalleeSavedRegisters();
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
@@ -1752,10 +1731,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiUntagToDouble(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src);
- // Tag and push in one step.
- inline void SmiTagAndPush(Register src);
- inline void SmiTagAndPush(Register src1, Register src2);
-
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2,
Label* both_smi_label,
@@ -1771,7 +1746,8 @@ class MacroAssembler : public TurboAssembler {
Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertNotSmi(Register object,
+ AbortReason reason = AbortReason::kOperandIsASmi);
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
@@ -1948,19 +1924,14 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Frames.
- // The stack pointer has to switch between csp and jssp when setting up and
- // destroying the exit frame. Hence preserving/restoring the registers is
- // slightly more complicated than simple push/pop operations.
void ExitFramePreserveFPRegs();
void ExitFrameRestoreFPRegs();
// Enter exit frame. Exit frames are used when calling C code from generated
// (JavaScript) code.
//
- // The stack pointer must be jssp on entry, and will be set to csp by this
- // function. The frame pointer is also configured, but the only other
- // registers modified by this function are the provided scratch register, and
- // jssp.
+ // The only registers modified by this function are the provided scratch
+ // register, the frame pointer and the stack pointer.
//
// The 'extra_space' argument can be used to allocate some space in the exit
// frame that will be ignored by the GC. This space will be reserved in the
@@ -1989,10 +1960,10 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- // * The stack pointer is reset to jssp.
//
// The stack pointer must be csp on entry.
- void LeaveExitFrame(bool save_doubles, const Register& scratch);
+ void LeaveExitFrame(bool save_doubles, const Register& scratch,
+ const Register& scratch2);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@@ -2042,9 +2013,8 @@ class MacroAssembler : public TurboAssembler {
// Debugging.
void AssertRegisterIsRoot(
- Register reg,
- Heap::RootListIndex index,
- BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ Register reg, Heap::RootListIndex index,
+ AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
// Abort if the specified register contains the invalid color bit pattern.
// The pattern must be in bits [1:0] of 'reg' register.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index c01741c31e..d0c464dfbe 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -98,13 +98,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ExternalReference::set_redirector(isolate, &RedirectExternalReference);
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
@@ -124,8 +117,7 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
-void Simulator::CallVoid(byte* entry, CallArgument* args) {
+void Simulator::CallImpl(byte* entry, CallArgument* args) {
int index_x = 0;
int index_d = 0;
@@ -167,63 +159,6 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
set_sp(original_stack);
}
-
-int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return xreg(0);
-}
-
-
-double Simulator::CallDouble(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return dreg(0);
-}
-
-
-int64_t Simulator::CallJS(byte* entry,
- Object* new_target,
- Object* target,
- Object* revc,
- int64_t argc,
- Object*** argv) {
- CallArgument args[] = {
- CallArgument(new_target),
- CallArgument(target),
- CallArgument(revc),
- CallArgument(argc),
- CallArgument(argv),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-
-int64_t Simulator::CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate) {
- CallArgument args[] = {
- CallArgument(input),
- CallArgument(start_offset),
- CallArgument(input_start),
- CallArgument(input_end),
- CallArgument(output),
- CallArgument(output_size),
- CallArgument(stack_base),
- CallArgument(direct_call),
- CallArgument(isolate),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-
void Simulator::CheckPCSComplianceAndRun() {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -350,6 +285,11 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return stack_limit_ + 1024;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+}
+
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate, FILE* stream)
: decoder_(decoder),
@@ -392,7 +332,7 @@ void Simulator::Init(FILE* stream) {
stack_limit_ = stack_ + stack_protection_size_;
uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
// The stack pointer must be 16-byte aligned.
- set_sp(tos & ~0xfUL);
+ set_sp(tos & ~0xFUL);
stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_);
@@ -412,11 +352,11 @@ void Simulator::ResetState() {
// Reset registers to 0.
pc_ = nullptr;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- set_xreg(i, 0xbadbeef);
+ set_xreg(i, 0xBADBEEF);
}
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
// Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
- set_dreg_bits(i, 0x7ff000007f800001UL);
+ set_dreg_bits(i, 0x7FF000007F800001UL);
}
// Returning to address 0 exits the Simulator.
set_lr(kEndOfSimAddress);
@@ -458,82 +398,6 @@ void Simulator::RunFrom(Instruction* start) {
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function), type_(type), next_(nullptr) {
- redirect_call_.SetInstructionBits(
- HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
- next_ = isolate->simulator_redirection();
- // TODO(all): Simulator flush I cache
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_redirect_call() {
- return reinterpret_cast<void*>(&redirect_call_);
- }
-
- template <typename T>
- T external_function() { return reinterpret_cast<T>(external_function_); }
-
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromHltInstruction(Instruction* redirect_call) {
- char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
- char* addr_of_redirection =
- addr_of_hlt - offsetof(Redirection, redirect_call_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int64_t reg) {
- Redirection* redirection =
- FromHltInstruction(reinterpret_cast<Instruction*>(reg));
- return redirection->external_function<void*>();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- Instruction redirect_call_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
-}
-
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
@@ -561,20 +425,20 @@ typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
void* arg2);
void Simulator::DoRuntimeCall(Instruction* instr) {
- Redirection* redirection = Redirection::FromHltInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
// The called C code might itself call simulated code, so any
// caller-saved registers (including lr) could still be clobbered by a
// redirected call.
Instruction* return_address = lr();
- int64_t external = redirection->external_function<int64_t>();
+ int64_t external =
+ reinterpret_cast<int64_t>(redirection->external_function());
- TraceSim("Call to host function at %p\n",
- redirection->external_function<void*>());
+ TraceSim("Call to host function at %p\n", redirection->external_function());
// SP must be 16-byte-aligned at the call interface.
- bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ bool stack_alignment_exception = ((sp() & 0xF) != 0);
if (stack_alignment_exception) {
TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
FATAL("ALIGNMENT EXCEPTION");
@@ -761,28 +625,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
set_pc(return_address);
}
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_redirect_call();
-}
-
-
const char* Simulator::xreg_names[] = {
-"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
-"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
+ "x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
+ "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
+ "cp", "x28", "fp", "lr", "xzr", "csp"};
const char* Simulator::wreg_names[] = {
-"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
-"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
-"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+ "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
+ "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
+ "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
+ "wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -1294,9 +1147,9 @@ void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
// a floating-point interpretation or a memory access annotation).
void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
// The template for vector types:
- // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // "# v{code}: 0xFFEEDDCCBBAA99887766554433221100".
// An example with bytes=4 and lsb=8:
- // "# v{code}: 0xbbaa9988 ".
+ // "# v{code}: 0xBBAA9988 ".
fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
clr_vreg_value);
@@ -1393,8 +1246,8 @@ void Simulator::PrintVRegisterFPHelper(unsigned code,
void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
int size_in_bytes) {
// The template for all supported sizes.
- // "# x{code}: 0xffeeddccbbaa9988"
- // "# w{code}: 0xbbaa9988"
+ // "# x{code}: 0xFFEEDDCCBBAA9988"
+ // "# w{code}: 0xBBAA9988"
// "# w{code}<15:0>: 0x9988"
// "# w{code}<7:0>: 0x88"
unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
@@ -2367,8 +2220,8 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
unsigned reg_code = instr->Rd();
int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
: wreg(reg_code);
- new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
- break;
+ new_xn_val = (prev_xn_val & ~(0xFFFFL << shift)) | shifted_imm16;
+ break;
}
case MOVZ_w:
case MOVZ_x: {
@@ -2532,14 +2385,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
- u0 = u & 0xffffffffL;
+ u0 = u & 0xFFFFFFFFL;
u1 = u >> 32;
- v0 = v & 0xffffffffL;
+ v0 = v & 0xFFFFFFFFL;
v1 = v >> 32;
w0 = u0 * v0;
t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xffffffffL;
+ w1 = t & 0xFFFFFFFFL;
w2 = t >> 32;
w1 = u0 * v1 + w1;
@@ -3344,7 +3197,7 @@ void Simulator::Debug() {
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int64_t*>(jssp());
+ cur = reinterpret_cast<int64_t*>(sp());
} else { // "mem"
int64_t value;
@@ -3381,7 +3234,7 @@ void Simulator::Debug() {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
STATIC_ASSERT(kSmiValueSize == 32);
- int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
PrintF("smi %" PRId32, untagged);
} else {
obj->ShortPrint();
@@ -4344,7 +4197,7 @@ void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
if (instr->NEONSize() == 1) {
- rm_reg &= 0xf;
+ rm_reg &= 0xF;
index = (index << 1) | instr->NEONM();
}
@@ -4909,9 +4762,9 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
case 0x6:
vform = (q == 1) ? kFormat4S : kFormat2S;
if (cmode_0 == 0) {
- imm = imm8 << 8 | 0x000000ff;
+ imm = imm8 << 8 | 0x000000FF;
} else {
- imm = imm8 << 16 | 0x0000ffff;
+ imm = imm8 << 16 | 0x0000FFFF;
}
break;
case 0x7:
@@ -4923,10 +4776,10 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
imm = 0;
for (int i = 0; i < 8; ++i) {
if (imm8 & (1 << i)) {
- imm |= (UINT64_C(0xff) << (8 * i));
+ imm |= (UINT64_C(0xFF) << (8 * i));
}
}
- } else { // cmode_0 == 1, cmode == 0xf.
+ } else { // cmode_0 == 1, cmode == 0xF.
if (op_bit == 0) {
vform = q ? kFormat4S : kFormat2S;
imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
@@ -4934,7 +4787,7 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
vform = kFormat2D;
imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
} else {
- DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xF));
VisitUnallocated(instr);
}
}
@@ -5278,7 +5131,7 @@ void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
if (instr->NEONSize() == 1) {
- rm_reg &= 0xf;
+ rm_reg &= 0xF;
index = (index << 1) | instr->NEONM();
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 0411c0bc96..a8f229d764 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -16,56 +16,13 @@
#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/globals.h"
+#include "src/simulator-base.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-#if !defined(USE_SIMULATOR)
-
-// Running without a simulator on a native ARM64 platform.
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm64_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm64_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// Running without a simulator there is nothing to do.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Assemble the specified IEEE-754 components into the target type and apply
// appropriate rounding.
@@ -269,6 +226,10 @@ T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
}
}
+class CachePage {
+ // TODO(all): Simulate instruction cache.
+};
+
// Representation of memory, with typed getters and setters for access.
class SimMemory {
public:
@@ -680,8 +641,11 @@ class LogicVRegister {
bool round_[kQRegSize];
};
-class Simulator : public DecoderVisitor {
+// Using multiple inheritance here is permitted because {DecoderVisitor} is a
+// pure interface class with only pure virtual methods.
+class Simulator : public DecoderVisitor, public SimulatorBase {
public:
+ static void SetRedirectInstruction(Instruction* instruction);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);
@@ -696,42 +660,7 @@ class Simulator : public DecoderVisitor {
// System functions.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
-
- static Simulator* current(v8::internal::Isolate* isolate);
-
- class CallArgument;
-
- // Call an arbitrary function taking an arbitrary number of arguments. The
- // varargs list must be a set of arguments with type CallArgument, and
- // terminated by CallArgument::End().
- void CallVoid(byte* entry, CallArgument* args);
-
- // Like CallVoid, but expect a return value.
- int64_t CallInt64(byte* entry, CallArgument* args);
- double CallDouble(byte* entry, CallArgument* args);
-
- // V8 calls into generated JS code with 5 parameters and into
- // generated RegExp code with 10 parameters. These are convenience functions,
- // which set up the simulator state and grab the result on return.
- int64_t CallJS(byte* entry,
- Object* new_target,
- Object* target,
- Object* revc,
- int64_t argc,
- Object*** argv);
- int64_t CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
// functions.
@@ -787,6 +716,14 @@ class Simulator : public DecoderVisitor {
CallArgument() { type_ = NO_ARG; }
};
+ // Call an arbitrary function taking an arbitrary number of arguments.
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ // Convert all arguments to CallArgument.
+ CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
+ CallImpl(entry, call_args);
+ return ReadReturn<Return>();
+ }
// Start the debugging command line.
void Debug();
@@ -806,10 +743,6 @@ class Simulator : public DecoderVisitor {
void ResetState();
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
// Run the simulator.
@@ -958,7 +891,6 @@ class Simulator : public DecoderVisitor {
inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; }
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
- int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
}
@@ -2345,6 +2277,21 @@ class Simulator : public DecoderVisitor {
private:
void Init(FILE* stream);
+ V8_EXPORT_PRIVATE void CallImpl(byte* entry, CallArgument* args);
+
+ // Read floating point return values.
+ template <typename T>
+ typename std::enable_if<std::is_floating_point<T>::value, T>::type
+ ReadReturn() {
+ return static_cast<T>(dreg(0));
+ }
+ // Read non-float return values.
+ template <typename T>
+ typename std::enable_if<!std::is_floating_point<T>::value, T>::type
+ ReadReturn() {
+ return ConvertReturn<T>(xreg(0));
+ }
+
template <typename T>
static T FPDefaultNaN();
@@ -2407,40 +2354,7 @@ inline float Simulator::FPDefaultNaN<float>() {
return kFP32DefaultNaN;
}
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
- FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->CallRegExp( \
- entry, p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc
index 03d1d37df9..9ee5ea6cc8 100644
--- a/deps/v8/src/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/arm64/simulator-logic-arm64.cc
@@ -3986,9 +3986,9 @@ T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
} else {
// Return FPMaxNormal(sign).
if (sizeof(T) == sizeof(float)) {
- return float_pack(sign, 0xfe, 0x07fffff);
+ return float_pack(sign, 0xFE, 0x07FFFFF);
} else {
- return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
+ return double_pack(sign, 0x7FE, 0x0FFFFFFFFFFFFFl);
}
}
} else {
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 8ef8420001..f8804d8b93 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -98,7 +98,7 @@ int CountTrailingZeros(uint64_t value, int width) {
return static_cast<int>(base::bits::CountTrailingZeros64(value));
}
return static_cast<int>(base::bits::CountTrailingZeros32(
- static_cast<uint32_t>(value & 0xfffffffff)));
+ static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
@@ -108,7 +108,7 @@ int CountSetBits(uint64_t value, int width) {
return static_cast<int>(base::bits::CountPopulation(value));
}
return static_cast<int>(
- base::bits::CountPopulation(static_cast<uint32_t>(value & 0xfffffffff)));
+ base::bits::CountPopulation(static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
int LowestSetBitPosition(uint64_t value) {
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index c38c52220d..6be80bf7af 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -292,8 +292,7 @@ void AsmJsParser::Begin(AsmJsScanner::token_t label) {
void AsmJsParser::Loop(AsmJsScanner::token_t label) {
BareBegin(BlockKind::kLoop, label);
- int position = static_cast<int>(scanner_.Position());
- DCHECK_EQ(position, scanner_.Position());
+ size_t position = scanner_.Position();
current_function_builder_->AddAsmWasmOffset(position, position);
current_function_builder_->EmitWithU8(kExprLoop, kLocalVoid);
}
@@ -450,7 +449,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
WasmInitExpr(dvalue));
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
DeclareGlobal(info, mutable_variable,
@@ -461,7 +460,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
WasmInitExpr(-dvalue));
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
DeclareGlobal(info, mutable_variable,
@@ -742,8 +741,7 @@ void AsmJsParser::ValidateFunction() {
return_type_ = nullptr;
// Record start of the function, used as position for the stack check.
- int start_position = static_cast<int>(scanner_.Position());
- current_function_builder_->SetAsmFunctionStartPosition(start_position);
+ current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position());
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
@@ -902,7 +900,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
current_function_builder_->EmitF64Const(-dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
info->kind = VarKind::kLocal;
@@ -954,7 +952,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
current_function_builder_->EmitF32Const(dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue > 0x7fffffff) {
+ if (uvalue > 0x7FFFFFFF) {
FAIL("Numeric literal out of range");
}
info->kind = VarKind::kLocal;
@@ -1337,7 +1335,7 @@ void AsmJsParser::ValidateCase() {
FAIL("Expected numeric literal");
}
// TODO(bradnelson): Share negation plumbing.
- if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7fffffff)) {
+ if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7FFFFFFF)) {
FAIL("Numeric literal out of range");
}
int32_t value = static_cast<int32_t>(uvalue);
@@ -1398,11 +1396,11 @@ AsmType* AsmJsParser::NumericLiteral() {
current_function_builder_->EmitF64Const(dvalue);
return AsmType::Double();
} else if (CheckForUnsigned(&uvalue)) {
- if (uvalue <= 0x7fffffff) {
+ if (uvalue <= 0x7FFFFFFF) {
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::FixNum();
} else {
- DCHECK_LE(uvalue, 0xffffffff);
+ DCHECK_LE(uvalue, 0xFFFFFFFF);
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::Unsigned();
}
@@ -1553,7 +1551,7 @@ AsmType* AsmJsParser::UnaryExpression() {
if (Check('-')) {
uint32_t uvalue;
if (CheckForUnsigned(&uvalue)) {
- // TODO(bradnelson): was supposed to be 0x7fffffff, check errata.
+ // TODO(bradnelson): was supposed to be 0x7FFFFFFF, check errata.
if (uvalue <= 0x80000000) {
current_function_builder_->EmitI32Const(-static_cast<int32_t>(uvalue));
} else {
@@ -1621,7 +1619,7 @@ AsmType* AsmJsParser::UnaryExpression() {
if (!ret->IsA(AsmType::Intish())) {
FAILn("operator ~ expects intish");
}
- current_function_builder_->EmitI32Const(0xffffffff);
+ current_function_builder_->EmitI32Const(0xFFFFFFFF);
current_function_builder_->Emit(kExprI32Xor);
ret = AsmType::Signed();
}
@@ -2066,8 +2064,8 @@ AsmType* AsmJsParser::ParenthesizedExpression() {
AsmType* AsmJsParser::ValidateCall() {
AsmType* return_type = call_coercion_;
call_coercion_ = nullptr;
- int call_pos = static_cast<int>(scanner_.Position());
- int to_number_pos = static_cast<int>(call_coercion_position_);
+ size_t call_pos = scanner_.Position();
+ size_t to_number_pos = call_coercion_position_;
bool allow_peek = (call_coercion_deferred_position_ == scanner_.Position());
AsmJsScanner::token_t function_name = Consume();
@@ -2113,7 +2111,7 @@ AsmType* AsmJsParser::ValidateCall() {
tmp.emplace(this);
current_function_builder_->EmitSetLocal(tmp->get());
// The position of function table calls is after the table lookup.
- call_pos = static_cast<int>(scanner_.Position());
+ call_pos = scanner_.Position();
} else {
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
@@ -2176,7 +2174,7 @@ AsmType* AsmJsParser::ValidateCall() {
(return_type == nullptr || return_type->IsA(AsmType::Float()))) {
DCHECK_NULL(call_coercion_deferred_);
call_coercion_deferred_ = AsmType::Signed();
- to_number_pos = static_cast<int>(scanner_.Position());
+ to_number_pos = scanner_.Position();
return_type = AsmType::Signed();
} else if (return_type == nullptr) {
to_number_pos = call_pos; // No conversion.
@@ -2395,9 +2393,9 @@ void AsmJsParser::ValidateHeapAccess() {
// TODO(bradnelson): Check more things.
// TODO(mstarzinger): Clarify and explain where this limit is coming from,
// as it is not mandated by the spec directly.
- if (offset > 0x7fffffff ||
+ if (offset > 0x7FFFFFFF ||
static_cast<uint64_t>(offset) * static_cast<uint64_t>(size) >
- 0x7fffffff) {
+ 0x7FFFFFFF) {
FAIL("Heap access out of range");
}
if (Check(']')) {
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 910fe37546..af41208ead 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -15,7 +15,7 @@ namespace internal {
namespace {
// Cap number of identifiers to ensure we can assign both global and
// local ones a token id in the range of an int32_t.
-static const int kMaxIdentifierCount = 0xf000000;
+static const int kMaxIdentifierCount = 0xF000000;
};
AsmJsScanner::AsmJsScanner(Utf16CharacterStream* stream)
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 90d7ac3ff8..1b83735bc9 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -131,14 +131,14 @@ static struct V8_ALIGNED(16) {
static struct V8_ALIGNED(16) {
uint64_t a;
uint64_t b;
-} double_absolute_constant = {V8_UINT64_C(0x7FFFFFFFFFFFFFFF),
- V8_UINT64_C(0x7FFFFFFFFFFFFFFF)};
+} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
+ uint64_t{0x7FFFFFFFFFFFFFFF}};
static struct V8_ALIGNED(16) {
uint64_t a;
uint64_t b;
-} double_negate_constant = {V8_UINT64_C(0x8000000000000000),
- V8_UINT64_C(0x8000000000000000)};
+} double_negate_constant = {uint64_t{0x8000000000000000},
+ uint64_t{0x8000000000000000}};
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@@ -351,7 +351,7 @@ void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- Assembler::set_target_address_at(isolate, pc_, host_, target,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
@@ -801,6 +801,16 @@ ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
return ExternalReference(isolate->builtins()->builtins_table_address());
}
+ExternalReference ExternalReference::handle_scope_implementer_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->handle_scope_implementer_address());
+}
+
+ExternalReference ExternalReference::pending_microtask_count_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_microtask_count_address());
+}
+
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -1002,6 +1012,16 @@ ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
}
+ExternalReference ExternalReference::wasm_word32_rol(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_rol_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_ror(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_ror_wrapper)));
+}
+
static void f64_acos_wrapper(double* param) {
WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
}
@@ -1514,6 +1534,12 @@ ExternalReference ExternalReference::runtime_function_table_address(
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
+ExternalReference ExternalReference::invalidate_prototype_chains_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
+}
+
double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 1e8365dcee..0cebdbc2d7 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -36,6 +36,7 @@
#define V8_ASSEMBLER_H_
#include <forward_list>
+#include <iosfwd>
#include "src/allocation.h"
#include "src/builtins/builtins.h"
@@ -54,9 +55,6 @@ namespace v8 {
class ApiFunction;
namespace internal {
-namespace wasm {
-class WasmCode;
-}
// Forward declarations.
class Isolate;
@@ -486,6 +484,7 @@ class RelocInfo {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
+ Address constant_pool() const { return constant_pool_; }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@@ -625,9 +624,6 @@ class RelocInfo {
byte* pc_;
Mode rmode_;
intptr_t data_;
- // TODO(mtrofin): try remove host_, if all we need is the constant_pool_ or
- // other few attributes, like start address, etc. This is so that we can reuse
- // RelocInfo for WasmCode without having a modal design.
Code* host_;
Address constant_pool_ = nullptr;
friend class RelocIterator;
@@ -830,6 +826,9 @@ class ExternalReference BASE_EMBEDDED {
// The builtins table as an external reference, used by lazy deserialization.
static ExternalReference builtins_address(Isolate* isolate);
+ static ExternalReference handle_scope_implementer_address(Isolate* isolate);
+ static ExternalReference pending_microtask_count_address(Isolate* isolate);
+
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
@@ -875,6 +874,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference wasm_word64_ctz(Isolate* isolate);
static ExternalReference wasm_word32_popcnt(Isolate* isolate);
static ExternalReference wasm_word64_popcnt(Isolate* isolate);
+ static ExternalReference wasm_word32_rol(Isolate* isolate);
+ static ExternalReference wasm_word32_ror(Isolate* isolate);
static ExternalReference wasm_float64_pow(Isolate* isolate);
static ExternalReference wasm_set_thread_in_wasm_flag(Isolate* isolate);
static ExternalReference wasm_clear_thread_in_wasm_flag(Isolate* isolate);
@@ -1019,6 +1020,9 @@ class ExternalReference BASE_EMBEDDED {
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
+ static ExternalReference invalidate_prototype_chains_function(
+ Isolate* isolate);
+
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to read out the last step action of the debugger.
@@ -1328,16 +1332,24 @@ class RegisterBase {
int bit() const { return 1 << code(); }
- inline bool operator==(SubType other) const {
+ inline constexpr bool operator==(SubType other) const {
return reg_code_ == other.reg_code_;
}
- inline bool operator!=(SubType other) const { return !(*this == other); }
+ inline constexpr bool operator!=(SubType other) const {
+ return reg_code_ != other.reg_code_;
+ }
protected:
explicit constexpr RegisterBase(int code) : reg_code_(code) {}
int reg_code_;
};
+template <typename SubType, int kAfterLastRegister>
+inline std::ostream& operator<<(std::ostream& os,
+ RegisterBase<SubType, kAfterLastRegister> reg) {
+ return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
+}
+
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 0736e543e2..ade1a85349 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -16,10 +16,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals)
- : zone_(zone),
- eager_literals_(eager_literals),
- suspend_count_(0),
- dont_optimize_reason_(kNoReason) {
+ : zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
InitializeAstVisitor(stack_limit);
}
@@ -39,19 +36,12 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
- void DisableOptimization(BailoutReason reason) {
- dont_optimize_reason_ = reason;
- }
-
- BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
-
Zone* zone() const { return zone_; }
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int suspend_count_;
FunctionKind function_kind_;
- BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -80,7 +70,6 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
- DisableOptimization(kNativeFunctionLiteral);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
@@ -206,6 +195,11 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
Visit(node->obj());
}
+void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
+ Visit(node->object());
+ Visit(node->property());
+}
+
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
Visit(node->target());
Visit(node->value());
@@ -262,6 +256,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator()); // Not part of loop.
+ Visit(node->assign_next());
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
Visit(node->result_done());
@@ -326,11 +321,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
- node->InitDepthAndFlags();
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code will be is emitted.
- node->CalculateEmitStore(zone_);
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
@@ -342,7 +332,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
- node->InitDepthAndFlags();
}
void AstNumberingVisitor::VisitCall(Call* node) {
@@ -402,7 +391,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
- node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
return !HasStackOverflow();
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 6ad4df357c..3679ec762a 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -243,6 +243,7 @@ void AstTraversalVisitor<Subclass>::VisitForStatement(ForStatement* stmt) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitForInStatement(ForInStatement* stmt) {
PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->each()));
RECURSE(Visit(stmt->enumerable()));
RECURSE(Visit(stmt->body()));
}
@@ -392,6 +393,14 @@ void AstTraversalVisitor<Subclass>::VisitProperty(Property* expr) {
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitResolvedProperty(
+ ResolvedProperty* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->object()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->property()));
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 710cbb40a5..da14d87475 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -514,18 +514,17 @@ bool ArrayLiteral::is_empty() const {
}
int ArrayLiteral::InitDepthAndFlags() {
- DCHECK_LT(first_spread_index_, 0);
if (is_initialized()) return depth();
- int constants_length = values()->length();
+ int constants_length =
+ first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
// Fill in the literals.
- bool is_simple = true;
+ bool is_simple = first_spread_index_ < 0;
int depth_acc = 1;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
- DCHECK(!element->IsSpread());
MaterializedLiteral* literal = element->AsMaterializedLiteral();
if (literal != nullptr) {
int subliteral_depth = literal->InitDepthAndFlags() + 1;
@@ -546,11 +545,10 @@ int ArrayLiteral::InitDepthAndFlags() {
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
- DCHECK_LT(first_spread_index_, 0);
-
if (!constant_elements_.is_null()) return;
- int constants_length = values()->length();
+ int constants_length =
+ first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArrayWithHoles(constants_length);
@@ -614,11 +612,6 @@ bool ArrayLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
}
-void ArrayLiteral::RewindSpreads() {
- values_->Rewind(first_spread_index_);
- first_spread_index_ = -1;
-}
-
bool MaterializedLiteral::IsSimple() const {
if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
@@ -812,6 +805,10 @@ Call::CallType Call::GetCallType() const {
}
}
+ if (expression()->IsResolvedProperty()) {
+ return RESOLVED_PROPERTY_CALL;
+ }
+
return OTHER_CALL;
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 1ca192a462..f608621d3b 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -94,6 +94,7 @@ namespace internal {
V(Literal) \
V(NativeFunctionLiteral) \
V(Property) \
+ V(ResolvedProperty) \
V(RewritableExpression) \
V(Spread) \
V(SuperCallReference) \
@@ -590,11 +591,13 @@ class ForInStatement final : public ForEachStatement {
class ForOfStatement final : public ForEachStatement {
public:
void Initialize(Statement* body, Variable* iterator,
- Expression* assign_iterator, Expression* next_result,
- Expression* result_done, Expression* assign_each) {
+ Expression* assign_iterator, Expression* assign_next,
+ Expression* next_result, Expression* result_done,
+ Expression* assign_each) {
ForEachStatement::Initialize(body);
iterator_ = iterator;
assign_iterator_ = assign_iterator;
+ assign_next_ = assign_next;
next_result_ = next_result;
result_done_ = result_done;
assign_each_ = assign_each;
@@ -609,6 +612,9 @@ class ForOfStatement final : public ForEachStatement {
return assign_iterator_;
}
+ // iteratorRecord.next = iterator.next
+ Expression* assign_next() const { return assign_next_; }
+
// result = iterator.next() // with type check
Expression* next_result() const {
return next_result_;
@@ -624,6 +630,12 @@ class ForOfStatement final : public ForEachStatement {
return assign_each_;
}
+ void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
+ void set_assign_next(Expression* e) { assign_next_ = e; }
+ void set_next_result(Expression* e) { next_result_ = e; }
+ void set_result_done(Expression* e) { result_done_ = e; }
+ void set_assign_each(Expression* e) { assign_each_ = e; }
+
private:
friend class AstNodeFactory;
@@ -637,6 +649,7 @@ class ForOfStatement final : public ForEachStatement {
Variable* iterator_;
Expression* assign_iterator_;
+ Expression* assign_next_;
Expression* next_result_;
Expression* result_done_;
Expression* assign_each_;
@@ -1450,22 +1463,23 @@ class ArrayLiteral final : public AggregateLiteral {
}
// Provide a mechanism for iterating through values to rewrite spreads.
- ZoneList<Expression*>::iterator FirstSpread() const {
+ ZoneList<Expression*>::iterator FirstSpreadOrEndValue() const {
return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_
: values_->end();
}
+ ZoneList<Expression*>::iterator BeginValue() const {
+ return values_->begin();
+ }
ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
- // Rewind an array literal omitting everything from the first spread on.
- void RewindSpreads();
-
private:
friend class AstNodeFactory;
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
- values_(values) {}
+ values_(values) {
+ }
int first_spread_index_;
Handle<ConstantElementsPair> constant_elements_;
@@ -1606,6 +1620,25 @@ class Property final : public Expression {
Expression* key_;
};
+// ResolvedProperty pairs a receiver field with a value field. It allows Call
+// to support arbitrary receivers while still taking advantage of TypeFeedback.
+class ResolvedProperty final : public Expression {
+ public:
+ VariableProxy* object() const { return object_; }
+ VariableProxy* property() const { return property_; }
+
+ void set_object(VariableProxy* e) { object_ = e; }
+ void set_property(VariableProxy* e) { property_ = e; }
+
+ private:
+ friend class AstNodeFactory;
+
+ ResolvedProperty(VariableProxy* obj, VariableProxy* property, int pos)
+ : Expression(pos, kResolvedProperty), object_(obj), property_(property) {}
+
+ VariableProxy* object_;
+ VariableProxy* property_;
+};
class Call final : public Expression {
public:
@@ -1632,6 +1665,7 @@ class Call final : public Expression {
NAMED_SUPER_PROPERTY_CALL,
KEYED_SUPER_PROPERTY_CALL,
SUPER_CALL,
+ RESOLVED_PROPERTY_CALL,
OTHER_CALL
};
@@ -1697,11 +1731,10 @@ class CallNew final : public Expression {
ZoneList<Expression*>* arguments_;
};
-
// The CallRuntime class does not represent any official JavaScript
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
-// implemented in JavaScript (see "v8natives.js").
+// implemented in JavaScript.
class CallRuntime final : public Expression {
public:
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -2104,7 +2137,6 @@ class YieldStar final : public Suspend {
// - One for awaiting the iterator result yielded by the delegated iterator
// (await_delegated_iterator_output_suspend_id)
int await_iterator_close_suspend_id() const {
- DCHECK_NE(-1, await_iterator_close_suspend_id_);
return await_iterator_close_suspend_id_;
}
void set_await_iterator_close_suspend_id(int id) {
@@ -2112,7 +2144,6 @@ class YieldStar final : public Suspend {
}
int await_delegated_iterator_output_suspend_id() const {
- DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
return await_delegated_iterator_output_suspend_id_;
}
void set_await_delegated_iterator_output_suspend_id(int id) {
@@ -2168,7 +2199,8 @@ class FunctionLiteral final : public Expression {
kAnonymousExpression,
kNamedExpression,
kDeclaration,
- kAccessorOrMethod
+ kAccessorOrMethod,
+ kWrapped,
};
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
@@ -2199,6 +2231,7 @@ class FunctionLiteral final : public Expression {
bool is_anonymous_expression() const {
return function_type() == kAnonymousExpression;
}
+ bool is_wrapped() const { return function_type() == kWrapped; }
LanguageMode language_mode() const;
static bool NeedsHomeObject(Expression* expr);
@@ -2274,7 +2307,9 @@ class FunctionLiteral final : public Expression {
}
FunctionKind kind() const;
- bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
+ bool dont_optimize() {
+ return dont_optimize_reason() != BailoutReason::kNoReason;
+ }
BailoutReason dont_optimize_reason() {
return DontOptimizeReasonField::decode(bit_field_);
}
@@ -2337,14 +2372,14 @@ class FunctionLiteral final : public Expression {
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
- DontOptimizeReasonField::encode(kNoReason) |
+ DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
RequiresInstanceFieldsInitializer::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
class FunctionTypeBits
- : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
+ : public BitField<FunctionType, Expression::kNextBitFieldIndex, 3> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class DontOptimizeReasonField
@@ -2993,6 +3028,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Property(obj, key, pos);
}
+ ResolvedProperty* NewResolvedProperty(VariableProxy* obj,
+ VariableProxy* property,
+ int pos = kNoSourcePosition) {
+ return new (zone_) ResolvedProperty(obj, property, pos);
+ }
+
Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
return new (zone_) Call(expression, arguments, pos, possibly_eval);
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index f01ade8896..374c848289 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -26,6 +26,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
is_iterator_error_ = false;
is_async_iterator_error_ = false;
is_user_js_ = is_user_js;
+ function_kind_ = kNormalFunction;
InitializeAstVisitor(isolate);
}
@@ -187,7 +188,10 @@ void CallPrinter::VisitDebuggerStatement(DebuggerStatement* node) {}
void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+ FunctionKind last_function_kind = function_kind_;
+ function_kind_ = node->kind();
FindStatements(node->body());
+ function_kind_ = last_function_kind;
}
@@ -250,7 +254,17 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
Print("[");
for (int i = 0; i < node->values()->length(); i++) {
if (i != 0) Print(",");
- Find(node->values()->at(i), true);
+ Expression* subexpr = node->values()->at(i);
+ Spread* spread = subexpr->AsSpread();
+ if (spread != nullptr && !found_ &&
+ position_ == spread->expression()->position()) {
+ found_ = true;
+ is_iterator_error_ = true;
+ Find(spread->expression(), true);
+ done_ = true;
+ return;
+ }
+ Find(subexpr, true);
}
Print("]");
}
@@ -277,7 +291,17 @@ void CallPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
-void CallPrinter::VisitYieldStar(YieldStar* node) { Find(node->expression()); }
+void CallPrinter::VisitYieldStar(YieldStar* node) {
+ if (!found_ && position_ == node->expression()->position()) {
+ found_ = true;
+ if (IsAsyncFunction(function_kind_))
+ is_async_iterator_error_ = true;
+ else
+ is_iterator_error_ = true;
+ Print("yield* ");
+ }
+ Find(node->expression());
+}
void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); }
@@ -302,6 +326,7 @@ void CallPrinter::VisitProperty(Property* node) {
}
}
+void CallPrinter::VisitResolvedProperty(ResolvedProperty* node) {}
void CallPrinter::VisitCall(Call* node) {
bool was_found = false;
@@ -960,8 +985,10 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
UNREACHABLE();
}
Print(" %s\n", prediction);
- PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
- node->scope()->catch_variable()->raw_name());
+ if (node->scope()) {
+ PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
+ node->scope()->catch_variable()->raw_name());
+ }
PrintIndentedVisit("CATCH", node->catch_block());
}
@@ -1223,6 +1250,14 @@ void AstPrinter::VisitProperty(Property* node) {
}
}
+void AstPrinter::VisitResolvedProperty(ResolvedProperty* node) {
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "RESOLVED-PROPERTY");
+ IndentedScope indent(this, buf.start(), node->position());
+
+ PrintIndentedVisit("RECEIVER", node->object());
+ PrintIndentedVisit("PROPERTY", node->property());
+}
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 97c2437877..d93137b7cf 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -50,6 +50,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
bool is_iterator_error_;
bool is_async_iterator_error_;
bool is_call_error_;
+ FunctionKind function_kind_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index d012ec90f1..8f2f85080c 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -147,8 +147,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
DCHECK_NE(SCRIPT_SCOPE, scope_type);
SetDefaults();
set_language_mode(outer_scope->language_mode());
- force_context_allocation_ =
- !is_function_scope() && outer_scope->has_forced_context_allocation();
outer_scope_->AddInnerScope(this);
}
@@ -649,8 +647,8 @@ void DeclarationScope::Analyze(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundScopeAnalysis
- : &RuntimeCallStats::CompileScopeAnalysis);
+ ? RuntimeCallCounterId::kCompileBackgroundScopeAnalysis
+ : RuntimeCallCounterId::kCompileScopeAnalysis);
DCHECK_NOT_NULL(info->literal());
DeclarationScope* scope = info->literal()->scope();
@@ -1370,12 +1368,8 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation.
if (s->is_with_scope()) continue;
- // Module scopes context-allocate all variables, and have no
- // {this} or {arguments} variables whose existence depends on
- // references to them.
- if (s->is_module_scope()) continue;
- // Only block scopes and function scopes should disallow preparsing.
- DCHECK(s->is_block_scope() || s->is_function_scope());
+ DCHECK(s->is_module_scope() || s->is_block_scope() ||
+ s->is_function_scope());
return false;
}
return true;
@@ -1443,6 +1437,10 @@ bool Scope::NeedsScopeInfo() const {
return NeedsContext();
}
+bool Scope::ShouldBanArguments() {
+ return GetReceiverScope()->should_ban_arguments();
+}
+
DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
while (!scope->is_script_scope() &&
@@ -1734,9 +1732,6 @@ void Scope::Print(int n) {
if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
}
- if (has_forced_context_allocation()) {
- Indent(n1, "// forces context allocation\n");
- }
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
@@ -2111,11 +2106,8 @@ bool Scope::MustAllocateInContext(Variable* var) {
// an eval() call or a runtime with lookup), it must be allocated in the
// context.
//
- // Exceptions: If the scope as a whole has forced context allocation, all
- // variables will have context allocation, even temporaries. Otherwise
- // temporary variables are always stack-allocated. Catch-bound variables are
+ // Temporary variables are always stack-allocated. Catch-bound variables are
// always context-allocated.
- if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope()) return true;
if ((is_script_scope() || is_eval_scope()) &&
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index bcfd2187df..d2e8886319 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -334,14 +334,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_hidden() const { return is_hidden_; }
void set_is_hidden() { is_hidden_ = true; }
- // In some cases we want to force context allocation for a whole scope.
- void ForceContextAllocation() {
- DCHECK(!already_resolved_);
- force_context_allocation_ = true;
- }
- bool has_forced_context_allocation() const {
- return force_context_allocation_;
- }
void ForceContextAllocationForParameters() {
DCHECK(!already_resolved_);
force_context_allocation_for_parameters_ = true;
@@ -404,6 +396,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return static_cast<Variable*>(variables_.Start()->value);
}
+ bool ShouldBanArguments();
+
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -704,6 +698,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; }
void set_asm_module();
+ bool should_ban_arguments() const {
+ return IsClassFieldsInitializerFunction(function_kind());
+ }
+
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
index ac7bb929b9..7cf983861c 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/bailout-reason.cc
@@ -8,13 +8,24 @@
namespace v8 {
namespace internal {
-const char* GetBailoutReason(BailoutReason reason) {
- DCHECK_LT(reason, kLastErrorMessage);
#define ERROR_MESSAGES_TEXTS(C, T) T,
+
+const char* GetBailoutReason(BailoutReason reason) {
+ DCHECK_LT(reason, BailoutReason::kLastErrorMessage);
+ DCHECK_GE(reason, BailoutReason::kNoReason);
static const char* error_messages_[] = {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
-#undef ERROR_MESSAGES_TEXTS
- return error_messages_[reason];
+ BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+ return error_messages_[static_cast<int>(reason)];
}
+
+const char* GetAbortReason(AbortReason reason) {
+ DCHECK_LT(reason, AbortReason::kLastErrorMessage);
+ DCHECK_GE(reason, AbortReason::kNoReason);
+ static const char* error_messages_[] = {
+ ABORT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+ return error_messages_[static_cast<int>(reason)];
+}
+
+#undef ERROR_MESSAGES_TEXTS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 2bb92e1a2b..c8e81c69d4 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -8,158 +8,117 @@
namespace v8 {
namespace internal {
-// TODO(svenpanne) introduce an AbortReason and partition this list
-#define ERROR_MESSAGES_LIST(V) \
+#define ABORT_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
- V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
- V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
- V(kClassConstructorFunction, "Class constructor function") \
- V(kClassLiteral, "Class literal") \
- V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
- V(kComputedPropertyName, "Computed property name") \
- V(kContextAllocatedArguments, "Context-allocated arguments") \
- V(kDebuggerStatement, "DebuggerStatement") \
- V(kDeclarationInCatchContext, "Declaration in catch context") \
- V(kDeclarationInWithContext, "Declaration in with context") \
- V(kDynamicImport, "Dynamic module import") \
- V(kCyclicObjectStateDetectedInEscapeAnalysis, \
- "Cyclic object state detected by escape analysis") \
- V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
- V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
- V(kExpectedHeapNumber, "Expected HeapNumber") \
- V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
- V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kForOfStatement, "ForOfStatement") \
- V(kFunctionBeingDebugged, "Function is being debugged") \
- V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
- V(kGenerator, "Generator") \
- V(kGetIterator, "GetIterator") \
- V(kGraphBuildingFailed, "Optimized graph construction failed") \
- V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
- V(kIndexIsNegative, "Index is negative") \
- V(kIndexIsTooLarge, "Index is too large") \
- V(kInputGPRIsExpectedToHaveUpper32Cleared, \
- "Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
- V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
- V(kLiveEdit, "LiveEdit") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
- V(kNativeFunctionLiteral, "Native function literal") \
- V(kNoCasesLeft, "No cases left") \
- V(kNonObject, "Non-object value") \
- V(kNotEnoughVirtualRegistersRegalloc, \
- "Not enough virtual registers (regalloc)") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kObjectTagged, "The object is tagged") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
- V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotASmi, "Operand is not a smi") \
- V(kOperandIsNotSmi, "Operand is not smi") \
- V(kObjectTagged, "The object is tagged") \
- V(kObjectNotTagged, "The object is not tagged") \
- V(kOptimizationDisabled, "Optimization disabled") \
- V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
- V(kReferenceToAVariableWhichRequiresDynamicLookup, \
- "Reference to a variable which requires dynamic lookup") \
- V(kReferenceToModuleVariable, "Reference to module-allocated variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
- V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
- V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kSpreadCall, "Call with spread argument") \
+ V(kShouldNotDirectlyEnterOsrFunction, \
+ "Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kSuperReference, "Super reference") \
- V(kTailCall, "Tail call") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
- V(kTooManyParameters, "Too many parameters") \
- V(kTryCatchStatement, "TryCatchStatement") \
- V(kTryFinallyStatement, "TryFinallyStatement") \
- V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
- V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
- "Unexpected fallthrough from CharCodeAt slow case") \
- V(kUnexpectedFallThroughFromStringComparison, \
- "Unexpected fall-through from string comparison") \
- V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
- "Unexpected fallthrough to CharCodeAt slow case") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedFunctionIDForInvokeIntrinsic, \
+ "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
V(kUnexpectedInitialMapForArrayFunction1, \
"Unexpected initial map for Array function (1)") \
V(kUnexpectedInitialMapForArrayFunction2, \
"Unexpected initial map for Array function (2)") \
- V(kUnexpectedInitialMapForArrayFunction, \
- "Unexpected initial map for Array function") \
V(kUnexpectedInitialMapForInternalArrayFunction, \
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedFunctionIDForInvokeIntrinsic, \
- "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
- V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
- V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
+ V(kUnexpectedReturnFromFrameDropper, \
+ "Unexpectedly returned from dropping frames") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnexpectedReturnFromWasmTrap, \
+ "Should not return after throwing a wasm trap") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
- V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
- V(kUnexpectedReturnFromFrameDropper, \
- "Unexpectedly returned from dropping frames") \
- V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
- V(kVariableResolvedToWithContext, "Variable resolved to with context") \
- V(kWithStatement, "WithStatement") \
- V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
- V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function") \
- V(kUnexpectedReturnFromWasmTrap, \
- "Should not return after throwing a wasm trap")
+ V(kWrongFunctionContext, "Wrong context passed to function")
+
+#define BAILOUT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kCyclicObjectStateDetectedInEscapeAnalysis, \
+ "Cyclic object state detected by escape analysis") \
+ V(kFunctionBeingDebugged, "Function is being debugged") \
+ V(kGraphBuildingFailed, "Optimized graph construction failed") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kNativeFunctionLiteral, "Native function literal") \
+ V(kNotEnoughVirtualRegistersRegalloc, \
+ "Not enough virtual registers (regalloc)") \
+ V(kOptimizationDisabled, "Optimization disabled") \
+ V(kOptimizationDisabledForTest, "Optimization disabled for test")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
-enum BailoutReason {
- ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+enum class BailoutReason {
+ BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+};
+
+enum class AbortReason {
+ ABORT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
};
#undef ERROR_MESSAGES_CONSTANTS
const char* GetBailoutReason(BailoutReason reason);
+const char* GetAbortReason(AbortReason reason);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/base/DEPS b/deps/v8/src/base/DEPS
index 60db5959fd..a9c31c20d6 100644
--- a/deps/v8/src/base/DEPS
+++ b/deps/v8/src/base/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"-include",
"+include/v8config.h",
+ "+include/v8-platform.h",
"-src",
"+src/base",
]
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index f449612e6a..22e0511dc7 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -356,12 +356,12 @@ CPU::CPU()
// Interpret CPU feature information.
if (num_ids > 0) {
__cpuid(cpu_info, 1);
- stepping_ = cpu_info[0] & 0xf;
- model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
- family_ = (cpu_info[0] >> 8) & 0xf;
+ stepping_ = cpu_info[0] & 0xF;
+ model_ = ((cpu_info[0] >> 4) & 0xF) + ((cpu_info[0] >> 12) & 0xF0);
+ family_ = (cpu_info[0] >> 8) & 0xF;
type_ = (cpu_info[0] >> 12) & 0x3;
- ext_model_ = (cpu_info[0] >> 16) & 0xf;
- ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ ext_model_ = (cpu_info[0] >> 16) & 0xF;
+ ext_family_ = (cpu_info[0] >> 20) & 0xFF;
has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
@@ -378,16 +378,16 @@ CPU::CPU()
if (family_ == 0x6) {
switch (model_) {
- case 0x1c: // SLT
+ case 0x1C: // SLT
case 0x26:
case 0x36:
case 0x27:
case 0x35:
case 0x37: // SLM
- case 0x4a:
- case 0x4d:
- case 0x4c: // AMT
- case 0x6e:
+ case 0x4A:
+ case 0x4D:
+ case 0x4C: // AMT
+ case 0x6E:
is_atom_ = true;
}
}
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 67f86c634f..ec3add1682 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -400,7 +400,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
if (n > sz) return nullptr;
if (base < 2 || base > 16) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
@@ -415,7 +415,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write the '-' character.
if (++n > sz) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
*start++ = '-';
@@ -427,7 +427,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
do {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
- buf[0] = '\000';
+ buf[0] = '\0';
return nullptr;
}
@@ -439,7 +439,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
} while (j > 0 || padding > 0);
// Terminate the output with a NUL character.
- *ptr = '\000';
+ *ptr = '\0';
// Conversion to ASCII actually resulted in the digits being in reverse
// order. We can't easily generate them in forward order, as we can't tell
diff --git a/deps/v8/src/base/functional.cc b/deps/v8/src/base/functional.cc
index 80a7585bcc..dffb91f3cc 100644
--- a/deps/v8/src/base/functional.cc
+++ b/deps/v8/src/base/functional.cc
@@ -69,8 +69,8 @@ V8_INLINE size_t hash_value_unsigned(T v) {
// This code was taken from MurmurHash.
size_t hash_combine(size_t seed, size_t value) {
#if V8_HOST_ARCH_32_BIT
- const uint32_t c1 = 0xcc9e2d51;
- const uint32_t c2 = 0x1b873593;
+ const uint32_t c1 = 0xCC9E2D51;
+ const uint32_t c2 = 0x1B873593;
value *= c1;
value = bits::RotateRight32(value, 15);
@@ -78,9 +78,9 @@ size_t hash_combine(size_t seed, size_t value) {
seed ^= value;
seed = bits::RotateRight32(seed, 13);
- seed = seed * 5 + 0xe6546b64;
+ seed = seed * 5 + 0xE6546B64;
#else
- const uint64_t m = V8_UINT64_C(0xc6a4a7935bd1e995);
+ const uint64_t m = uint64_t{0xC6A4A7935BD1E995};
const uint32_t r = 47;
value *= m;
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index ca0c6d1314..54f7e2e6aa 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -225,16 +225,16 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
z = 0;
GET_HIGH_WORD(hx, x); /* high word of x */
- ix = hx & 0x7fffffff;
- if (ix <= 0x3fe921fb) { /* |x| ~<= pi/4 , no need for reduction */
+ ix = hx & 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) { /* |x| ~<= pi/4 , no need for reduction */
y[0] = x;
y[1] = 0;
return 0;
}
- if (ix < 0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if (ix < 0x4002D97C) { /* |x| < 3pi/4, special case with n=+-1 */
if (hx > 0) {
z = x - pio2_1;
- if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
y[0] = z - pio2_1t;
y[1] = (z - y[0]) - pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -245,7 +245,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
return 1;
} else { /* negative x */
z = x + pio2_1;
- if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
y[0] = z + pio2_1t;
y[1] = (z - y[0]) + pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -256,7 +256,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
return -1;
}
}
- if (ix <= 0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ if (ix <= 0x413921FB) { /* |x| ~<= 2^19*(pi/2), medium size */
t = fabs(x);
n = static_cast<int32_t>(t * invpio2 + half);
fn = static_cast<double>(n);
@@ -269,7 +269,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
j = ix >> 20;
y[0] = r - w;
GET_HIGH_WORD(high, y[0]);
- i = j - ((high >> 20) & 0x7ff);
+ i = j - ((high >> 20) & 0x7FF);
if (i > 16) { /* 2nd iteration needed, good to 118 */
t = r;
w = fn * pio2_2;
@@ -277,7 +277,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
w = fn * pio2_2t - ((t - r) - w);
y[0] = r - w;
GET_HIGH_WORD(high, y[0]);
- i = j - ((high >> 20) & 0x7ff);
+ i = j - ((high >> 20) & 0x7FF);
if (i > 49) { /* 3rd iteration need, 151 bits acc */
t = r; /* will cover all possible cases */
w = fn * pio2_3;
@@ -299,7 +299,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
/*
* all other (large) arguments
*/
- if (ix >= 0x7ff00000) { /* x is inf or NaN */
+ if (ix >= 0x7FF00000) { /* x is inf or NaN */
y[0] = y[1] = x - x;
return 0;
}
@@ -331,7 +331,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
*
* Algorithm
* 1. Since cos(-x) = cos(x), we need only to consider positive x.
- * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 2. if x < 2^-27 (hx<0x3E400000 0), return 1 with inexact if x!=0.
* 3. cos(x) is approximated by a polynomial of degree 14 on
* [0,pi/4]
* 4 14
@@ -370,8 +370,8 @@ V8_INLINE double __kernel_cos(double x, double y) {
double a, iz, z, r, qx;
int32_t ix;
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff; /* ix = |x|'s high word*/
- if (ix < 0x3e400000) { /* if x < 2**27 */
+ ix &= 0x7FFFFFFF; /* ix = |x|'s high word*/
+ if (ix < 0x3E400000) { /* if x < 2**27 */
if (static_cast<int>(x) == 0) return one; /* generate inexact */
}
z = x * x;
@@ -379,7 +379,7 @@ V8_INLINE double __kernel_cos(double x, double y) {
if (ix < 0x3FD33333) { /* if |x| < 0.3 */
return one - (0.5 * z - (z * r - x * y));
} else {
- if (ix > 0x3fe90000) { /* x > 0.78125 */
+ if (ix > 0x3FE90000) { /* x > 0.78125 */
qx = 0.28125;
} else {
INSERT_WORDS(qx, ix - 0x00200000, 0); /* x/4 */
@@ -585,16 +585,16 @@ recompute:
iq[i] = 0x1000000 - j;
}
} else {
- iq[i] = 0xffffff - j;
+ iq[i] = 0xFFFFFF - j;
}
}
if (q0 > 0) { /* rare case: chance is 1 in 12 */
switch (q0) {
case 1:
- iq[jz - 1] &= 0x7fffff;
+ iq[jz - 1] &= 0x7FFFFF;
break;
case 2:
- iq[jz - 1] &= 0x3fffff;
+ iq[jz - 1] &= 0x3FFFFF;
break;
}
}
@@ -706,7 +706,7 @@ recompute:
*
* Algorithm
* 1. Since sin(-x) = -sin(x), we need only to consider positive x.
- * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 2. if x < 2^-27 (hx<0x3E400000 0), return x with inexact if x!=0.
* 3. sin(x) is approximated by a polynomial of degree 13 on
* [0,pi/4]
* 3 13
@@ -738,8 +738,8 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
double z, r, v;
int32_t ix;
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff; /* high word of x */
- if (ix < 0x3e400000) { /* |x| < 2**-27 */
+ ix &= 0x7FFFFFFF; /* high word of x */
+ if (ix < 0x3E400000) { /* |x| < 2**-27 */
if (static_cast<int>(x) == 0) return x;
} /* generate inexact */
z = x * x;
@@ -761,7 +761,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
*
* Algorithm
* 1. Since tan(-x) = -tan(x), we need only to consider positive x.
- * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0.
* 3. tan(x) is approximated by a odd polynomial of degree 27 on
* [0,0.67434]
* 3 27
@@ -813,8 +813,8 @@ double __kernel_tan(double x, double y, int iy) {
int32_t ix, hx;
GET_HIGH_WORD(hx, x); /* high word of x */
- ix = hx & 0x7fffffff; /* high word of |x| */
- if (ix < 0x3e300000) { /* x < 2**-28 */
+ ix = hx & 0x7FFFFFFF; /* high word of |x| */
+ if (ix < 0x3E300000) { /* x < 2**-28 */
if (static_cast<int>(x) == 0) { /* generate inexact */
uint32_t low;
GET_LOW_WORD(low, x);
@@ -934,11 +934,11 @@ double acos(double x) {
double z, p, q, r, w, s, c, df;
int32_t hx, ix;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x3ff00000) { /* |x| >= 1 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x3FF00000) { /* |x| >= 1 */
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((ix - 0x3ff00000) | lx) == 0) { /* |x|==1 */
+ if (((ix - 0x3FF00000) | lx) == 0) { /* |x|==1 */
if (hx > 0)
return 0.0; /* acos(1) = 0 */
else
@@ -946,8 +946,8 @@ double acos(double x) {
}
return (x - x) / (x - x); /* acos(|x|>1) is NaN */
}
- if (ix < 0x3fe00000) { /* |x| < 0.5 */
- if (ix <= 0x3c600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
+ if (ix < 0x3FE00000) { /* |x| < 0.5 */
+ if (ix <= 0x3C600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
z = x * x;
p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
@@ -996,15 +996,15 @@ double acosh(double x) {
int32_t hx;
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
- if (hx < 0x3ff00000) { /* x < 1 */
+ if (hx < 0x3FF00000) { /* x < 1 */
return (x - x) / (x - x);
- } else if (hx >= 0x41b00000) { /* x > 2**28 */
- if (hx >= 0x7ff00000) { /* x is inf of NaN */
+ } else if (hx >= 0x41B00000) { /* x > 2**28 */
+ if (hx >= 0x7FF00000) { /* x is inf of NaN */
return x + x;
} else {
return log(x) + ln2; /* acosh(huge)=log(2x) */
}
- } else if (((hx - 0x3ff00000) | lx) == 0) {
+ } else if (((hx - 0x3FF00000) | lx) == 0) {
return 0.0; /* acosh(1) = 0 */
} else if (hx > 0x40000000) { /* 2**28 > x > 2 */
t = x * x;
@@ -1067,15 +1067,15 @@ double asin(double x) {
t = 0;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x3ff00000) { /* |x|>= 1 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x3FF00000) { /* |x|>= 1 */
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((ix - 0x3ff00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
+ if (((ix - 0x3FF00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
return x * pio2_hi + x * pio2_lo;
return (x - x) / (x - x); /* asin(|x|>1) is NaN */
- } else if (ix < 0x3fe00000) { /* |x|<0.5 */
- if (ix < 0x3e400000) { /* if |x| < 2**-27 */
+ } else if (ix < 0x3FE00000) { /* |x|<0.5 */
+ if (ix < 0x3E400000) { /* if |x| < 2**-27 */
if (huge + x > one) return x; /* return x with inexact if x!=0*/
} else {
t = x * x;
@@ -1127,12 +1127,12 @@ double asinh(double x) {
double t, w;
int32_t hx, ix;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
- if (ix >= 0x7ff00000) return x + x; /* x is inf or NaN */
- if (ix < 0x3e300000) { /* |x|<2**-28 */
+ ix = hx & 0x7FFFFFFF;
+ if (ix >= 0x7FF00000) return x + x; /* x is inf or NaN */
+ if (ix < 0x3E300000) { /* |x|<2**-28 */
if (huge + x > one) return x; /* return x inexact except 0 */
}
- if (ix > 0x41b00000) { /* |x| > 2**28 */
+ if (ix > 0x41B00000) { /* |x| > 2**28 */
w = log(fabs(x)) + ln2;
} else if (ix > 0x40000000) { /* 2**28 > |x| > 2.0 */
t = fabs(x);
@@ -1202,26 +1202,26 @@ double atan(double x) {
int32_t ix, hx, id;
GET_HIGH_WORD(hx, x);
- ix = hx & 0x7fffffff;
+ ix = hx & 0x7FFFFFFF;
if (ix >= 0x44100000) { /* if |x| >= 2^66 */
uint32_t low;
GET_LOW_WORD(low, x);
- if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
+ if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
return x + x; /* NaN */
if (hx > 0)
return atanhi[3] + *(volatile double *)&atanlo[3];
else
return -atanhi[3] - *(volatile double *)&atanlo[3];
}
- if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
- if (ix < 0x3e400000) { /* |x| < 2^-27 */
+ if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
+ if (ix < 0x3E400000) { /* |x| < 2^-27 */
if (huge + x > one) return x; /* raise inexact */
}
id = -1;
} else {
x = fabs(x);
- if (ix < 0x3ff30000) { /* |x| < 1.1875 */
- if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ if (ix < 0x3FF30000) { /* |x| < 1.1875 */
+ if (ix < 0x3FE60000) { /* 7/16 <=|x|<11/16 */
id = 0;
x = (2.0 * x - one) / (2.0 + x);
} else { /* 11/16<=|x|< 19/16 */
@@ -1294,14 +1294,14 @@ double atan2(double y, double x) {
uint32_t lx, ly;
EXTRACT_WORDS(hx, lx, x);
- ix = hx & 0x7fffffff;
+ ix = hx & 0x7FFFFFFF;
EXTRACT_WORDS(hy, ly, y);
- iy = hy & 0x7fffffff;
- if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7ff00000) ||
- ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7ff00000)) {
+ iy = hy & 0x7FFFFFFF;
+ if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7FF00000) ||
+ ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7FF00000)) {
return x + y; /* x or y is NaN */
}
- if (((hx - 0x3ff00000) | lx) == 0) return atan(y); /* x=1.0 */
+ if (((hx - 0x3FF00000) | lx) == 0) return atan(y); /* x=1.0 */
m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
/* when y = 0 */
@@ -1320,8 +1320,8 @@ double atan2(double y, double x) {
if ((ix | lx) == 0) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
/* when x is INF */
- if (ix == 0x7ff00000) {
- if (iy == 0x7ff00000) {
+ if (ix == 0x7FF00000) {
+ if (iy == 0x7FF00000) {
switch (m) {
case 0:
return pi_o_4 + tiny; /* atan(+INF,+INF) */
@@ -1346,7 +1346,7 @@ double atan2(double y, double x) {
}
}
/* when y is INF */
- if (iy == 0x7ff00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+ if (iy == 0x7FF00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
/* compute y/x */
k = (iy - ix) >> 20;
@@ -1408,10 +1408,10 @@ double cos(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_cos(x, z);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* cos(Inf or NaN) is NaN */
return x - x;
} else {
@@ -1497,18 +1497,18 @@ double exp(double x) {
one = 1.0,
halF[2] = {0.5, -0.5},
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
- u_threshold = -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
- ln2HI[2] = {6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
- -6.93147180369123816490e-01}, /* 0xbfe62e42, 0xfee00000 */
- ln2LO[2] = {1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
- -1.90821492927058770002e-10}, /* 0xbdea39ef, 0x35793c76 */
- invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ u_threshold = -7.45133219101941108420e+02, /* 0xC0874910, 0xD52D3051 */
+ ln2HI[2] = {6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
+ -6.93147180369123816490e-01}, /* 0xBFE62E42, 0xFEE00000 */
+ ln2LO[2] = {1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
+ -1.90821492927058770002e-10}, /* 0xBDEA39EF, 0x35793C76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
- E = 2.718281828459045; /* 0x4005bf0a, 0x8b145769 */
+ E = 2.718281828459045; /* 0x4005BF0A, 0x8B145769 */
static volatile double
huge = 1.0e+300,
@@ -1521,14 +1521,14 @@ double exp(double x) {
GET_HIGH_WORD(hx, x);
xsb = (hx >> 31) & 1; /* sign bit of x */
- hx &= 0x7fffffff; /* high word of |x| */
+ hx &= 0x7FFFFFFF; /* high word of |x| */
/* filter out non-finite argument */
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
- if (hx >= 0x7ff00000) {
+ if (hx >= 0x7FF00000) {
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((hx & 0xfffff) | lx) != 0)
+ if (((hx & 0xFFFFF) | lx) != 0)
return x + x; /* NaN */
else
return (xsb == 0) ? x : 0.0; /* exp(+-inf)={inf,0} */
@@ -1538,7 +1538,7 @@ double exp(double x) {
}
/* argument reduction */
- if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
/* TODO(rtoy): We special case exp(1) here to return the correct
* value of E, as the computation below would get the last bit
@@ -1555,7 +1555,7 @@ double exp(double x) {
lo = t * ln2LO[0];
}
STRICT_ASSIGN(double, x, hi - lo);
- } else if (hx < 0x3e300000) { /* when |x|<2**-28 */
+ } else if (hx < 0x3E300000) { /* when |x|<2**-28 */
if (huge + x > one) return one + x; /* trigger inexact */
} else {
k = 0;
@@ -1564,9 +1564,9 @@ double exp(double x) {
/* x is now in primary range */
t = x * x;
if (k >= -1021) {
- INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0);
+ INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0);
} else {
- INSERT_WORDS(twopk, 0x3ff00000 + ((k + 1000) << 20), 0);
+ INSERT_WORDS(twopk, 0x3FF00000 + ((k + 1000) << 20), 0);
}
c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
if (k == 0) {
@@ -1607,13 +1607,13 @@ double atanh(double x) {
int32_t hx, ix;
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
- ix = hx & 0x7fffffff;
- if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3ff00000) /* |x|>1 */
+ ix = hx & 0x7FFFFFFF;
+ if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3FF00000) /* |x|>1 */
return (x - x) / (x - x);
- if (ix == 0x3ff00000) return x / zero;
- if (ix < 0x3e300000 && (huge + x) > zero) return x; /* x<2**-28 */
+ if (ix == 0x3FF00000) return x / zero;
+ if (ix < 0x3E300000 && (huge + x) > zero) return x; /* x<2**-28 */
SET_HIGH_WORD(x, ix);
- if (ix < 0x3fe00000) { /* x < 0.5 */
+ if (ix < 0x3FE00000) { /* x < 0.5 */
t = x + x;
t = 0.5 * log1p(t + t * x / (one - x));
} else {
@@ -1699,21 +1699,21 @@ double log(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
}
- if (hx >= 0x7ff00000) return x + x;
+ if (hx >= 0x7FF00000) return x + x;
k += (hx >> 20) - 1023;
- hx &= 0x000fffff;
- i = (hx + 0x95f64) & 0x100000;
- SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ hx &= 0x000FFFFF;
+ i = (hx + 0x95F64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
k += (i >> 20);
f = x - 1.0;
- if ((0x000fffff & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
+ if ((0x000FFFFF & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
if (f == zero) {
if (k == 0) {
return zero;
@@ -1733,9 +1733,9 @@ double log(double x) {
s = f / (2.0 + f);
dk = static_cast<double>(k);
z = s * s;
- i = hx - 0x6147a;
+ i = hx - 0x6147A;
w = z * z;
- j = 0x6b851 - hx;
+ j = 0x6B851 - hx;
t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
i |= j;
@@ -1838,30 +1838,30 @@ double log1p(double x) {
int32_t k, hx, hu, ax;
GET_HIGH_WORD(hx, x);
- ax = hx & 0x7fffffff;
+ ax = hx & 0x7FFFFFFF;
k = 1;
if (hx < 0x3FDA827A) { /* 1+x < sqrt(2)+ */
- if (ax >= 0x3ff00000) { /* x <= -1.0 */
+ if (ax >= 0x3FF00000) { /* x <= -1.0 */
if (x == -1.0)
return -two54 / vzero; /* log1p(-1)=+inf */
else
return (x - x) / (x - x); /* log1p(x<-1)=NaN */
}
- if (ax < 0x3e200000) { /* |x| < 2**-29 */
+ if (ax < 0x3E200000) { /* |x| < 2**-29 */
if (two54 + x > zero /* raise inexact */
- && ax < 0x3c900000) /* |x| < 2**-54 */
+ && ax < 0x3C900000) /* |x| < 2**-54 */
return x;
else
return x - x * x * 0.5;
}
- if (hx > 0 || hx <= static_cast<int32_t>(0xbfd2bec4)) {
+ if (hx > 0 || hx <= static_cast<int32_t>(0xBFD2BEC4)) {
k = 0;
f = x;
hu = 1;
} /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
}
- if (hx >= 0x7ff00000) return x + x;
+ if (hx >= 0x7FF00000) return x + x;
if (k != 0) {
if (hx < 0x43400000) {
STRICT_ASSIGN(double, u, 1.0 + x);
@@ -1875,7 +1875,7 @@ double log1p(double x) {
k = (hu >> 20) - 1023;
c = 0;
}
- hu &= 0x000fffff;
+ hu &= 0x000FFFFF;
/*
* The approximation to sqrt(2) used in thresholds is not
* critical. However, the ones used above must give less
@@ -1883,11 +1883,11 @@ double log1p(double x) {
* never reached from here, since here we have committed to
* using the correction term but don't use it if k==0.
*/
- if (hu < 0x6a09e) { /* u ~< sqrt(2) */
- SET_HIGH_WORD(u, hu | 0x3ff00000); /* normalize u */
+ if (hu < 0x6A09E) { /* u ~< sqrt(2) */
+ SET_HIGH_WORD(u, hu | 0x3FF00000); /* normalize u */
} else {
k += 1;
- SET_HIGH_WORD(u, hu | 0x3fe00000); /* normalize u/2 */
+ SET_HIGH_WORD(u, hu | 0x3FE00000); /* normalize u/2 */
hu = (0x00100000 - hu) >> 2;
}
f = u - 1.0;
@@ -2012,8 +2012,8 @@ static inline double k_log1p(double f) {
double log2(double x) {
static const double
two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
- ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
- ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
+ ivln2hi = 1.44269504072144627571e+00, /* 0x3FF71547, 0x65200000 */
+ ivln2lo = 1.67517131648865118353e-10; /* 0x3DE705FC, 0x2EEFA200 */
static const double zero = 0.0;
static volatile double vzero = 0.0;
@@ -2026,19 +2026,19 @@ double log2(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
}
- if (hx >= 0x7ff00000) return x + x;
- if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx >= 0x7FF00000) return x + x;
+ if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
k += (hx >> 20) - 1023;
- hx &= 0x000fffff;
- i = (hx + 0x95f64) & 0x100000;
- SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ hx &= 0x000FFFFF;
+ i = (hx + 0x95F64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
k += (i >> 20);
y = static_cast<double>(k);
f = x - 1.0;
@@ -2133,7 +2133,7 @@ double log10(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7fffffff) | lx) == 0)
+ if (((hx & 0x7FFFFFFF) | lx) == 0)
return -two54 / vzero; /* log(+-0)=-inf */
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
k -= 54;
@@ -2141,12 +2141,12 @@ double log10(double x) {
GET_HIGH_WORD(hx, x);
GET_LOW_WORD(lx, x);
}
- if (hx >= 0x7ff00000) return x + x;
- if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx >= 0x7FF00000) return x + x;
+ if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
k += (hx >> 20) - 1023;
i = (k & 0x80000000) >> 31;
- hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
+ hx = (hx & 0x000FFFFF) | ((0x3FF - i) << 20);
y = k + i;
SET_HIGH_WORD(x, hx);
SET_LOW_WORD(x, lx);
@@ -2254,9 +2254,9 @@ double expm1(double x) {
one = 1.0,
tiny = 1.0e-300,
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
- ln2_hi = 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
- ln2_lo = 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
- invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ ln2_hi = 6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
/* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs =
x*x/2: */
Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
@@ -2273,15 +2273,15 @@ double expm1(double x) {
GET_HIGH_WORD(hx, x);
xsb = hx & 0x80000000; /* sign bit of x */
- hx &= 0x7fffffff; /* high word of |x| */
+ hx &= 0x7FFFFFFF; /* high word of |x| */
/* filter out huge and non-finite argument */
if (hx >= 0x4043687A) { /* if |x|>=56*ln2 */
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
- if (hx >= 0x7ff00000) {
+ if (hx >= 0x7FF00000) {
uint32_t low;
GET_LOW_WORD(low, x);
- if (((hx & 0xfffff) | low) != 0)
+ if (((hx & 0xFFFFF) | low) != 0)
return x + x; /* NaN */
else
return (xsb == 0) ? x : -1.0; /* exp(+-inf)={inf,-1} */
@@ -2295,7 +2295,7 @@ double expm1(double x) {
}
/* argument reduction */
- if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
if (xsb == 0) {
hi = x - ln2_hi;
@@ -2314,7 +2314,7 @@ double expm1(double x) {
}
STRICT_ASSIGN(double, x, hi - lo);
c = (hi - x) - lo;
- } else if (hx < 0x3c900000) { /* when |x|<2**-54, return x */
+ } else if (hx < 0x3C900000) { /* when |x|<2**-54, return x */
t = huge + x; /* return x with inexact flags when x!=0 */
return x - (t - (huge + x));
} else {
@@ -2330,7 +2330,7 @@ double expm1(double x) {
if (k == 0) {
return x - (x * e - hxs); /* c is 0 */
} else {
- INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0); /* 2^k */
+ INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0); /* 2^k */
e = (x * (e - c) - c);
e -= hxs;
if (k == -1) return 0.5 * (x - e) - 0.5;
@@ -2353,11 +2353,11 @@ double expm1(double x) {
}
t = one;
if (k < 20) {
- SET_HIGH_WORD(t, 0x3ff00000 - (0x200000 >> k)); /* t=1-2^-k */
+ SET_HIGH_WORD(t, 0x3FF00000 - (0x200000 >> k)); /* t=1-2^-k */
y = t - (e - x);
y = y * twopk;
} else {
- SET_HIGH_WORD(t, ((0x3ff - k) << 20)); /* 2^-k */
+ SET_HIGH_WORD(t, ((0x3FF - k) << 20)); /* 2^-k */
y = x - (e + t);
y += one;
y = y * twopk;
@@ -2372,11 +2372,11 @@ double cbrt(double x) {
B2 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
/* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
- static const double P0 = 1.87595182427177009643, /* 0x3ffe03e6, 0x0f61e692 */
- P1 = -1.88497979543377169875, /* 0xbffe28e0, 0x92f02420 */
- P2 = 1.621429720105354466140, /* 0x3ff9f160, 0x4a49d6c2 */
- P3 = -0.758397934778766047437, /* 0xbfe844cb, 0xbee751d9 */
- P4 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
+ static const double P0 = 1.87595182427177009643, /* 0x3FFE03E6, 0x0F61E692 */
+ P1 = -1.88497979543377169875, /* 0xBFFE28E0, 0x92F02420 */
+ P2 = 1.621429720105354466140, /* 0x3FF9F160, 0x4A49D6C2 */
+ P3 = -0.758397934778766047437, /* 0xBFE844CB, 0xBEE751D9 */
+ P4 = 0.145996192886612446982; /* 0x3FC2B000, 0xD4E4EDD7 */
int32_t hx;
union {
@@ -2390,7 +2390,7 @@ double cbrt(double x) {
EXTRACT_WORDS(hx, low, x);
sign = hx & 0x80000000; /* sign= sign(x) */
hx ^= sign;
- if (hx >= 0x7ff00000) return (x + x); /* cbrt(NaN,INF) is itself */
+ if (hx >= 0x7FF00000) return (x + x); /* cbrt(NaN,INF) is itself */
/*
* Rough cbrt to 5 bits:
@@ -2412,7 +2412,7 @@ double cbrt(double x) {
SET_HIGH_WORD(t, 0x43500000); /* set t= 2**54 */
t *= x;
GET_HIGH_WORD(high, t);
- INSERT_WORDS(t, sign | ((high & 0x7fffffff) / 3 + B2), 0);
+ INSERT_WORDS(t, sign | ((high & 0x7FFFFFFF) / 3 + B2), 0);
} else {
INSERT_WORDS(t, sign | (hx / 3 + B1), 0);
}
@@ -2441,7 +2441,7 @@ double cbrt(double x) {
* before the final error is larger than 0.667 ulps.
*/
u.value = t;
- u.bits = (u.bits + 0x80000000) & 0xffffffffc0000000ULL;
+ u.bits = (u.bits + 0x80000000) & 0xFFFFFFFFC0000000ULL;
t = u.value;
/* one step Newton iteration to 53 bits with error < 0.667 ulps */
@@ -2492,10 +2492,10 @@ double sin(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_sin(x, z, 0);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* sin(Inf or NaN) is NaN */
return x - x;
} else {
@@ -2551,10 +2551,10 @@ double tan(double x) {
GET_HIGH_WORD(ix, x);
/* |x| ~< pi/4 */
- ix &= 0x7fffffff;
- if (ix <= 0x3fe921fb) {
+ ix &= 0x7FFFFFFF;
+ if (ix <= 0x3FE921FB) {
return __kernel_tan(x, z, 1);
- } else if (ix >= 0x7ff00000) {
+ } else if (ix >= 0x7FF00000) {
/* tan(Inf or NaN) is NaN */
return x - x; /* NaN */
} else {
@@ -2596,14 +2596,14 @@ double cosh(double x) {
/* High word of |x|. */
GET_HIGH_WORD(ix, x);
- ix &= 0x7fffffff;
+ ix &= 0x7FFFFFFF;
// |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
- if (ix < 0x3fd62e43) {
+ if (ix < 0x3FD62E43) {
double t = expm1(fabs(x));
double w = one + t;
// For |x| < 2^-55, cosh(x) = 1
- if (ix < 0x3c800000) return w;
+ if (ix < 0x3C800000) return w;
return one + (t * t) / (w + w);
}
@@ -2614,7 +2614,7 @@ double cosh(double x) {
}
// |x| in [22, log(maxdouble)], return half*exp(|x|)
- if (ix < 0x40862e42) return half * exp(fabs(x));
+ if (ix < 0x40862E42) return half * exp(fabs(x));
// |x| in [log(maxdouble), overflowthreshold]
if (fabs(x) <= KCOSH_OVERFLOW) {
@@ -2624,7 +2624,7 @@ double cosh(double x) {
}
/* x is INF or NaN */
- if (ix >= 0x7ff00000) return x * x;
+ if (ix >= 0x7FF00000) return x * x;
// |x| > overflowthreshold.
return huge * huge;
@@ -2653,7 +2653,7 @@ double sinh(double x) {
static const double KSINH_OVERFLOW = 710.4758600739439,
TWO_M28 =
3.725290298461914e-9, // 2^-28, empty lower half
- LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
+ LOG_MAXD = 709.7822265625; // 0x40862E42 00000000, empty lower half
static const double shuge = 1.0e307;
double h = (x < 0) ? -0.5 : 0.5;
@@ -2712,10 +2712,10 @@ double tanh(double x) {
int32_t jx, ix;
GET_HIGH_WORD(jx, x);
- ix = jx & 0x7fffffff;
+ ix = jx & 0x7FFFFFFF;
/* x is INF or NaN */
- if (ix >= 0x7ff00000) {
+ if (ix >= 0x7FF00000) {
if (jx >= 0)
return one / x + one; /* tanh(+-inf)=+-1 */
else
@@ -2724,10 +2724,10 @@ double tanh(double x) {
/* |x| < 22 */
if (ix < 0x40360000) { /* |x|<22 */
- if (ix < 0x3e300000) { /* |x|<2**-28 */
+ if (ix < 0x3E300000) { /* |x|<2**-28 */
if (huge + x > one) return x; /* tanh(tiny) = tiny with inexact */
}
- if (ix >= 0x3ff00000) { /* |x|>=1 */
+ if (ix >= 0x3FF00000) { /* |x|>=1 */
t = expm1(two * fabs(x));
z = one - two / (t + two);
} else {
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index 92f9b309a7..e965382b8d 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -168,17 +168,13 @@ struct LazyInstanceImpl {
typedef typename AllocationTrait::StorageType StorageType;
private:
- static void InitInstance(StorageType* storage) {
- AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+ static void InitInstance(void* storage) {
+ AllocationTrait::template InitStorageUsingTrait<CreateTrait>(
+ static_cast<StorageType*>(storage));
}
void Init() const {
- InitOnceTrait::Init(
- &once_,
- // Casts to void* are needed here to avoid breaking strict aliasing
- // rules.
- reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT
- reinterpret_cast<void*>(&storage_));
+ InitOnceTrait::Init(&once_, &InitInstance, static_cast<void*>(&storage_));
}
public:
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 13fbec0e90..ad5349ac7e 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -119,8 +119,6 @@ DEFINE_CHECK_OP_IMPL(GT)
} // namespace base
} // namespace v8
-
-// Contains protection against recursive calls (faults while handling faults).
void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stdout);
fflush(stderr);
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 9f3a1e6991..5275fdc6a6 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -20,23 +20,13 @@
V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
const char* message);
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
#ifdef DEBUG
-#define FATAL(msg) \
- V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE() \
- V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#define FATAL(...) V8_Fatal(__FILE__, __LINE__, __VA_ARGS__)
#else
-#define FATAL(msg) \
- V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() V8_Fatal("", 0, "unreachable code")
+#define FATAL(...) V8_Fatal("", 0, __VA_ARGS__)
#endif
-
+#define UNIMPLEMENTED() FATAL("unimplemented code")
+#define UNREACHABLE() FATAL("unreachable code")
namespace v8 {
namespace base {
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 5aa8eff68d..a265408d91 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
+#include <limits>
+
#include "src/base/compiler-specific.h"
#include "src/base/format-macros.h"
#include "src/base/logging.h"
@@ -167,18 +169,23 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define DISABLE_ASAN
#endif
-// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
-#if !defined(DISABLE_CFI_PERF)
+// Helper macro to define no_sanitize attributes only with clang.
#if defined(__clang__) && defined(__has_attribute)
#if __has_attribute(no_sanitize)
-#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
-#endif
+#define CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
#endif
#endif
-#if !defined(DISABLE_CFI_PERF)
-#define DISABLE_CFI_PERF
+#if !defined(CLANG_NO_SANITIZE)
+#define CLANG_NO_SANITIZE(what)
#endif
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
+#define DISABLE_CFI_PERF CLANG_NO_SANITIZE("cfi")
+
+// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks,
+// useful because calls into JITed code can not be CFI verified.
+#define DISABLE_CFI_ICALL CLANG_NO_SANITIZE("cfi-icall")
+
#if V8_CC_GNU
#define V8_IMMEDIATE_CRASH() __builtin_trap()
#else
@@ -214,34 +221,16 @@ struct Use {
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
#if V8_CC_MSVC
-# define V8_UINT64_C(x) (x ## UI64)
-# define V8_INT64_C(x) (x ## I64)
# if V8_HOST_ARCH_64_BIT
-# define V8_INTPTR_C(x) (x ## I64)
# define V8_PTR_PREFIX "ll"
# else
-# define V8_INTPTR_C(x) (x)
# define V8_PTR_PREFIX ""
# endif // V8_HOST_ARCH_64_BIT
#elif V8_CC_MINGW64
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# define V8_INTPTR_C(x) (x ## LL)
# define V8_PTR_PREFIX "I64"
#elif V8_HOST_ARCH_64_BIT
-# if V8_OS_MACOSX || V8_OS_OPENBSD
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# else
-# define V8_UINT64_C(x) (x ## UL)
-# define V8_INT64_C(x) (x ## L)
-# endif
-# define V8_INTPTR_C(x) (x ## L)
# define V8_PTR_PREFIX "l"
#else
-# define V8_UINT64_C(x) (x ## ULL)
-# define V8_INT64_C(x) (x ## LL)
-# define V8_INTPTR_C(x) (x)
#if V8_OS_AIX
#define V8_PTR_PREFIX "l"
#else
@@ -329,4 +318,24 @@ inline void* AlignedAddress(void* address, size_t alignment) {
~static_cast<uintptr_t>(alignment - 1));
}
+// Bounds checks for float to integer conversions, which does truncation. Hence,
+// the range of legal values is (min - 1, max + 1).
+template <typename int_t, typename float_t, typename biggest_int_t = int64_t>
+bool is_inbounds(float_t v) {
+ static_assert(sizeof(int_t) < sizeof(biggest_int_t),
+ "int_t can't be bounds checked by the compiler");
+ constexpr float_t kLowerBound =
+ static_cast<float_t>(std::numeric_limits<int_t>::min()) - 1;
+ constexpr float_t kUpperBound =
+ static_cast<float_t>(std::numeric_limits<int_t>::max()) + 1;
+ constexpr bool kLowerBoundIsMin =
+ static_cast<biggest_int_t>(kLowerBound) ==
+ static_cast<biggest_int_t>(std::numeric_limits<int_t>::min());
+ constexpr bool kUpperBoundIsMax =
+ static_cast<biggest_int_t>(kUpperBound) ==
+ static_cast<biggest_int_t>(std::numeric_limits<int_t>::max());
+ return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) &&
+ (kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
+}
+
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/once.cc b/deps/v8/src/base/once.cc
index 818a9f2e84..3e5e21925d 100644
--- a/deps/v8/src/base/once.cc
+++ b/deps/v8/src/base/once.cc
@@ -15,7 +15,7 @@
namespace v8 {
namespace base {
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+void CallOnceImpl(OnceType* once, std::function<void()> init_func) {
AtomicWord state = Acquire_Load(once);
// Fast path. The provided function was already executed.
if (state == ONCE_STATE_DONE) {
@@ -34,7 +34,7 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
if (state == ONCE_STATE_UNINITIALIZED) {
// We are the first thread to call this function, so we have to call the
// function.
- init_func(arg);
+ init_func();
Release_Store(once, ONCE_STATE_DONE);
} else {
// Another thread has already started executing the function. We need to
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index ea9c2fa88d..f355ef52ae 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -53,6 +53,7 @@
#define V8_BASE_ONCE_H_
#include <stddef.h>
+#include <functional>
#include "src/base/atomicops.h"
#include "src/base/base-export.h"
@@ -80,13 +81,12 @@ struct OneArgFunction {
typedef void (*type)(T);
};
-V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
- void* arg);
+V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
+ std::function<void()> init_func);
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
- nullptr);
+ CallOnceImpl(once, init_func);
}
}
@@ -95,8 +95,7 @@ template <typename Arg>
inline void CallOnce(OnceType* once,
typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
- static_cast<void*>(arg));
+ CallOnceImpl(once, [=]() { init_func(arg); });
}
}
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
new file mode 100644
index 0000000000..25ee2e4721
--- /dev/null
+++ b/deps/v8/src/base/page-allocator.cc
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/page-allocator.h"
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+#define STATIC_ASSERT_ENUM(a, b) \
+ static_assert(static_cast<int>(a) == static_cast<int>(b), \
+ "mismatching enum: " #a)
+
+STATIC_ASSERT_ENUM(PageAllocator::kNoAccess,
+ base::OS::MemoryPermission::kNoAccess);
+STATIC_ASSERT_ENUM(PageAllocator::kReadWrite,
+ base::OS::MemoryPermission::kReadWrite);
+STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute,
+ base::OS::MemoryPermission::kReadWriteExecute);
+STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
+ base::OS::MemoryPermission::kReadExecute);
+
+#undef STATIC_ASSERT_ENUM
+
+size_t PageAllocator::AllocatePageSize() {
+ return base::OS::AllocatePageSize();
+}
+
+size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
+
+void PageAllocator::SetRandomMmapSeed(int64_t seed) {
+ base::OS::SetRandomMmapSeed(seed);
+}
+
+void* PageAllocator::GetRandomMmapAddr() {
+ return base::OS::GetRandomMmapAddr();
+}
+
+void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) {
+ return base::OS::Allocate(address, size, alignment,
+ static_cast<base::OS::MemoryPermission>(access));
+}
+
+bool PageAllocator::FreePages(void* address, size_t size) {
+ return base::OS::Free(address, size);
+}
+
+bool PageAllocator::ReleasePages(void* address, size_t size, size_t new_size) {
+ DCHECK_LT(new_size, size);
+ return base::OS::Release(reinterpret_cast<uint8_t*>(address) + new_size,
+ size - new_size);
+}
+
+bool PageAllocator::SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ return base::OS::SetPermissions(
+ address, size, static_cast<base::OS::MemoryPermission>(access));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
new file mode 100644
index 0000000000..ff817cdba2
--- /dev/null
+++ b/deps/v8/src/base/page-allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PAGE_ALLOCATOR_H_
+#define V8_BASE_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+class V8_BASE_EXPORT PageAllocator
+ : public NON_EXPORTED_BASE(::v8::PageAllocator) {
+ public:
+ virtual ~PageAllocator() = default;
+
+ size_t AllocatePageSize() override;
+
+ size_t CommitPageSize() override;
+
+ void SetRandomMmapSeed(int64_t seed) override;
+
+ void* GetRandomMmapAddr() override;
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 83a8a23c48..38a7070e85 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -124,12 +124,11 @@ bool OS::HasLazyCommits() {
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
- return std::vector<SharedLibraryAddress>();
+ UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
void OS::SignalCodeMovingGC() {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
+ UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index b873197d3b..5edbd7648b 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -89,6 +89,7 @@ const char* g_gc_fake_mmap = nullptr;
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
#if !V8_OS_FUCHSIA
#if V8_OS_MACOSX
@@ -130,11 +131,9 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
}
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
- const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
int prot = GetProtectionFromMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access);
- void* result =
- mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
+ void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
@@ -167,11 +166,7 @@ int ReclaimInaccessibleMemory(void* address, size_t size) {
} // namespace
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
g_gc_fake_mmap = gc_fake_mmap;
}
@@ -207,45 +202,60 @@ size_t OS::CommitPageSize() {
}
// static
+void OS::SetRandomMmapSeed(int64_t seed) {
+ if (seed) {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->SetSeed(seed);
+ }
+}
+
+// static
void* OS::GetRandomMmapAddr() {
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER)
- // Dynamic tools do not support custom mmap addresses.
- return nullptr;
-#endif
uintptr_t raw_addr;
- platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
- sizeof(raw_addr));
+ {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+ sizeof(raw_addr));
+ }
+#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
+ // If random hint addresses interfere with address ranges hard coded in
+ // sanitizers, bad things happen. This address range is copied from TSAN
+ // source but works with all tools.
+ // See crbug.com/539863.
+ raw_addr &= 0x007fffff0000ULL;
+ raw_addr += 0x7e8000000000ULL;
+#else
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
+ raw_addr &= uint64_t{0x3FFFFFFFF000};
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
- raw_addr &= V8_UINT64_C(0x3ffff000);
+ raw_addr &= uint64_t{0x3FFFF000};
// Use extra address space to isolate the mmap regions.
- raw_addr += V8_UINT64_C(0x400000000000);
+ raw_addr += uint64_t{0x400000000000};
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x03fffffff000);
+ raw_addr &= uint64_t{0x03FFFFFFF000};
#else
// Little-endian Linux: 48 bits of virtual addressing.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
+ raw_addr &= uint64_t{0x3FFFFFFFF000};
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
- raw_addr &= V8_UINT64_C(0xfffffff000);
+ raw_addr &= uint64_t{0xFFFFFFF000};
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
- raw_addr &= 0x1ffff000;
+ raw_addr &= 0x1FFFF000;
#else
- raw_addr &= 0x3ffff000;
+ raw_addr &= 0x3FFFF000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
@@ -269,6 +279,7 @@ void* OS::GetRandomMmapAddr() {
raw_addr += 0x20000000;
#endif
#endif
+#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -283,6 +294,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
address = AlignedAddress(address, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
+ request_size = RoundUp(request_size, OS::AllocatePageSize());
void* result = base::Allocate(address, request_size, access);
if (result == nullptr) return nullptr;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index e026d7edae..22580cc407 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -674,8 +674,15 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
+
+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
+ g_hard_abort = hard_abort;
+}
+
+// static
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
if (allocate_alignment == 0) {
@@ -686,6 +693,7 @@ size_t OS::AllocatePageSize() {
return allocate_alignment;
}
+// static
size_t OS::CommitPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
@@ -697,17 +705,15 @@ size_t OS::CommitPageSize() {
return page_size;
}
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
+// static
+void OS::SetRandomMmapSeed(int64_t seed) {
+ if (seed) {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->SetSeed(seed);
}
- g_hard_abort = hard_abort;
}
+// static
void* OS::GetRandomMmapAddr() {
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
@@ -722,8 +728,11 @@ void* OS::GetRandomMmapAddr() {
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
uintptr_t address;
- platform_random_number_generator.Pointer()->NextBytes(&address,
- sizeof(address));
+ {
+ LockGuard<Mutex> guard(rng_mutex.Pointer());
+ platform_random_number_generator.Pointer()->NextBytes(&address,
+ sizeof(address));
+ }
address <<= kPageSizeBits;
address += kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index dd454ecd43..8a4545c607 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -36,6 +36,7 @@
#endif
namespace v8 {
+
namespace base {
// ----------------------------------------------------------------------------
@@ -93,10 +94,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
-
+class PageAllocator;
class TimezoneCache;
-
// ----------------------------------------------------------------------------
// OS
//
@@ -107,11 +107,9 @@ class TimezoneCache;
class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
- // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap);
+ static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -157,6 +155,8 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
+ // Memory permissions. These should be kept in sync with the ones in
+ // v8::PageAllocator.
enum class MemoryPermission {
kNoAccess,
kReadWrite,
@@ -165,40 +165,6 @@ class V8_BASE_EXPORT OS {
kReadExecute
};
- // Gets the page granularity for Allocate. Addresses returned by Allocate are
- // aligned to this size.
- static size_t AllocatePageSize();
-
- // Gets the granularity at which the permissions and commit calls can be made.
- static size_t CommitPageSize();
-
- // Generate a random address to be used for hinting allocation calls.
- static void* GetRandomMmapAddr();
-
- // Allocates memory. Permissions are set according to the access argument.
- // The address parameter is a hint. The size and alignment parameters must be
- // multiples of AllocatePageSize(). Returns the address of the allocated
- // memory, with the specified size and alignment, or nullptr on failure.
- V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
- size_t alignment,
- MemoryPermission access);
-
- // Frees memory allocated by a call to Allocate. address and size must be
- // multiples of AllocatePageSize(). Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
-
- // Releases memory that is no longer needed. The range specified by address
- // and size must be part of an allocated memory region, and must be multiples
- // of CommitPageSize(). Released memory is left in an undefined state, so it
- // should not be accessed. Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
-
- // Sets permissions according to the access argument. address and size must be
- // multiples of CommitPageSize(). Setting permission to kNoAccess may cause
- // the memory contents to be lost. Returns true on success, otherwise false.
- V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
- MemoryPermission access);
-
static bool HasLazyCommits();
// Sleep for a specified time interval.
@@ -280,6 +246,30 @@ class V8_BASE_EXPORT OS {
static int GetCurrentThreadId();
private:
+ // These classes use the private memory management API below.
+ friend class MemoryMappedFile;
+ friend class PosixMemoryMappedFile;
+ friend class v8::base::PageAllocator;
+
+ static size_t AllocatePageSize();
+
+ static size_t CommitPageSize();
+
+ static void SetRandomMmapSeed(int64_t seed);
+
+ static void* GetRandomMmapAddr();
+
+ V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
+ size_t alignment,
+ MemoryPermission access);
+
+ V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
+
+ V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
+ MemoryPermission access);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 9a7ef7a8f4..5950664523 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -136,7 +136,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
Semaphore::Semaphore(int count) {
DCHECK_GE(count, 0);
- native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr);
+ native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7FFFFFFF, nullptr);
DCHECK_NOT_NULL(native_handle_);
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 3529d55875..1fcd7aecce 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -298,8 +298,7 @@ Time Time::NowFromSystemTime() {
// Time between windows epoch and standard epoch.
-static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
-
+static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
Time Time::FromFiletime(FILETIME ft) {
if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
diff --git a/deps/v8/src/base/safe_conversions.h b/deps/v8/src/base/safe_conversions.h
index c16fa36682..f63f1ad99e 100644
--- a/deps/v8/src/base/safe_conversions.h
+++ b/deps/v8/src/base/safe_conversions.h
@@ -53,8 +53,7 @@ inline Dst saturated_cast(Src value) {
// Should fail only on attempting to assign NaN to a saturated integer.
case internal::RANGE_INVALID:
- CHECK(false);
- return std::numeric_limits<Dst>::max();
+ UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 86c3694feb..afe5a1f098 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -213,9 +213,9 @@ void RandomNumberGenerator::SetSeed(int64_t seed) {
uint64_t RandomNumberGenerator::MurmurHash3(uint64_t h) {
h ^= h >> 33;
- h *= V8_UINT64_C(0xFF51AFD7ED558CCD);
+ h *= uint64_t{0xFF51AFD7ED558CCD};
h ^= h >> 33;
- h *= V8_UINT64_C(0xC4CEB9FE1A85EC53);
+ h *= uint64_t{0xC4CEB9FE1A85EC53};
h ^= h >> 33;
return h;
}
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 285c5972e0..321ce861fb 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -113,8 +113,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// Static and exposed for external use.
static inline double ToDouble(uint64_t state0, uint64_t state1) {
// Exponent for double values for [1.0 .. 2.0)
- static const uint64_t kExponentBits = V8_UINT64_C(0x3FF0000000000000);
- static const uint64_t kMantissaMask = V8_UINT64_C(0x000FFFFFFFFFFFFF);
+ static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
+ static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
return bit_cast<double>(random) - 1;
}
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 087ec45323..a0a398b7aa 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -278,7 +278,7 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
void Bignum::MultiplyByPowerOfTen(int exponent) {
- const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765c793, fa10079d);
+ const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765C793, fa10079d);
const uint16_t kFive1 = 5;
const uint16_t kFive2 = kFive1 * 5;
const uint16_t kFive3 = kFive2 * 5;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 695200172d..399b705f00 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -214,10 +214,12 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
+ enum ArrayBufferKind {
+ ARRAY_BUFFER,
+ SHARED_ARRAY_BUFFER,
+ };
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice);
+ ArrayBufferKind array_buffer_kind);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -780,7 +782,7 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
"EmptyObjectPrototype");
map->set_is_prototype_map(true);
// Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
- map->set_immutable_proto(true);
+ map->set_is_immutable_proto(true);
object_function_prototype->set_map(*map);
// Complete setting up empty function.
@@ -1073,12 +1075,12 @@ void Genesis::CreateJSProxyMaps() {
// constructable proxies.
Handle<Map> proxy_map = factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize,
TERMINAL_FAST_ELEMENTS_KIND);
- proxy_map->set_dictionary_map(true);
+ proxy_map->set_is_dictionary_map(true);
proxy_map->set_may_have_interesting_symbols(true);
native_context()->set_proxy_map(*proxy_map);
Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
- proxy_callable_map->set_is_callable();
+ proxy_callable_map->set_is_callable(true);
native_context()->set_proxy_callable_map(*proxy_callable_map);
proxy_callable_map->SetConstructor(native_context()->function_function());
@@ -1086,6 +1088,31 @@ void Genesis::CreateJSProxyMaps() {
Map::Copy(proxy_callable_map, "constructor Proxy");
proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+
+ {
+ Handle<Map> map =
+ factory()->NewMap(JS_OBJECT_TYPE, JSProxyRevocableResult::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 2);
+ Map::EnsureDescriptorSlack(map, 2);
+
+ { // proxy
+ Descriptor d = Descriptor::DataField(factory()->proxy_string(),
+ JSProxyRevocableResult::kProxyIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // revoke
+ Descriptor d = Descriptor::DataField(factory()->revoke_string(),
+ JSProxyRevocableResult::kRevokeIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ Map::SetPrototype(map, isolate()->initial_object_prototype());
+ map->SetConstructor(native_context()->object_function());
+
+ native_context()->set_proxy_revocable_result_map(*map);
+ }
}
namespace {
@@ -1227,7 +1254,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
}
js_global_object_function->initial_map()->set_is_prototype_map(true);
- js_global_object_function->initial_map()->set_dictionary_map(true);
+ js_global_object_function->initial_map()->set_is_dictionary_map(true);
js_global_object_function->initial_map()->set_may_have_interesting_symbols(
true);
Handle<JSGlobalObject> global_object =
@@ -1481,9 +1508,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
SimpleInstallFunction(object_function, factory->entries_string(),
- Builtins::kObjectEntries, 1, false);
+ Builtins::kObjectEntries, 1, true);
SimpleInstallFunction(object_function, factory->values_string(),
- Builtins::kObjectValues, 1, false);
+ Builtins::kObjectValues, 1, true);
SimpleInstallFunction(isolate->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1517,6 +1544,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->proto_string(),
Builtins::kObjectPrototypeGetProto,
Builtins::kObjectPrototypeSetProto, DONT_ENUM);
+
+ SimpleInstallFunction(isolate->initial_object_prototype(), "toLocaleString",
+ Builtins::kObjectPrototypeToLocaleString, 0, true);
}
Handle<JSObject> global(native_context()->global_object());
@@ -1679,12 +1709,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DONT_ENUM);
SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
- SimpleInstallFunction(proto, "pop", Builtins::kFastArrayPop, 0, false);
- SimpleInstallFunction(proto, "push", Builtins::kFastArrayPush, 1, false);
- SimpleInstallFunction(proto, "shift", Builtins::kFastArrayShift, 0, false);
+ SimpleInstallFunction(proto, "find", Builtins::kArrayPrototypeFind, 1,
+ false);
+ SimpleInstallFunction(proto, "findIndex",
+ Builtins::kArrayPrototypeFindIndex, 1, false);
+ SimpleInstallFunction(proto, "pop", Builtins::kArrayPrototypePop, 0, false);
+ SimpleInstallFunction(proto, "push", Builtins::kArrayPrototypePush, 1,
+ false);
+ SimpleInstallFunction(proto, "shift", Builtins::kArrayPrototypeShift, 0,
+ false);
SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
if (FLAG_enable_experimental_builtins) {
- SimpleInstallFunction(proto, "slice", Builtins::kFastArraySlice, 2,
+ SimpleInstallFunction(proto, "slice", Builtins::kArrayPrototypeSlice, 2,
false);
} else {
SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
@@ -1798,6 +1834,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> number_fun = InstallFunction(
global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
+ number_fun->shared()->set_builtin_function_id(kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, NumberConstructor_ConstructStub));
@@ -1942,6 +1979,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> string_fun = InstallFunction(
global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
+ string_fun->shared()->set_builtin_function_id(kStringConstructor);
string_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, StringConstructor_ConstructStub));
string_fun->shared()->DontAdaptArguments();
@@ -2123,6 +2161,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
+ symbol_fun->shared()->set_builtin_function_id(kSymbolConstructor);
symbol_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, SymbolConstructor_ConstructStub));
symbol_fun->shared()->set_length(0);
@@ -2135,6 +2174,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false);
// Install well-known symbols.
+ InstallConstant(isolate, symbol_fun, "asyncIterator",
+ factory->async_iterator_symbol());
InstallConstant(isolate, symbol_fun, "hasInstance",
factory->has_instance_symbol());
InstallConstant(isolate, symbol_fun, "isConcatSpreadable",
@@ -2362,11 +2403,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> promise_then =
SimpleInstallFunction(prototype, isolate->factory()->then_string(),
- Builtins::kPromiseThen, 2, true);
+ Builtins::kPromisePrototypeThen, 2, true);
native_context()->set_promise_then(*promise_then);
Handle<JSFunction> promise_catch = SimpleInstallFunction(
- prototype, "catch", Builtins::kPromiseCatch, 1, true);
+ prototype, "catch", Builtins::kPromisePrototypeCatch, 1, true);
native_context()->set_promise_catch(*promise_catch);
// Force the Promise constructor to fast properties, so that we can use the
@@ -2410,8 +2451,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // Internal: PromiseHandle
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(), Builtins::kPromiseHandle, 5, false);
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kPromiseHandleJS, 5, false);
native_context()->set_promise_handle(*function);
}
@@ -2921,10 +2963,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- A r r a y B u f f e r
Handle<String> name = factory->InternalizeUtf8String("ArrayBuffer");
- Handle<JSFunction> array_buffer_fun =
- CreateArrayBuffer(name, Builtins::kArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kArrayBufferByteLength,
- Builtins::kArrayBufferPrototypeSlice);
+ Handle<JSFunction> array_buffer_fun = CreateArrayBuffer(name, ARRAY_BUFFER);
JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
@@ -2940,10 +2979,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- S h a r e d A r r a y B u f f e r
Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
- Handle<JSFunction> shared_array_buffer_fun = CreateArrayBuffer(
- name, Builtins::kSharedArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kSharedArrayBufferByteLength,
- Builtins::kSharedArrayBufferPrototypeSlice);
+ Handle<JSFunction> shared_array_buffer_fun =
+ CreateArrayBuffer(name, SHARED_ARRAY_BUFFER);
InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
Context::SHARED_ARRAY_BUFFER_FUN_INDEX);
InstallSpeciesGetter(shared_array_buffer_fun);
@@ -3415,6 +3452,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_proxy_function(*proxy_function);
InstallFunction(global, name, proxy_function, factory->Object_string());
+
+ SimpleInstallFunction(proxy_function, "revocable",
+ Builtins::kProxyRevocable, 2, true);
+
+ { // Internal: ProxyRevoke
+ Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ isolate, Builtins::kProxyRevoke, factory->empty_string(), 0);
+ native_context()->set_proxy_revoke_shared_fun(*info);
+ }
}
{ // -- R e f l e c t
@@ -3467,7 +3513,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize,
TERMINAL_FAST_ELEMENTS_KIND, 0);
map->SetConstructor(native_context()->object_function());
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
PropertyAttributes roc_attribs =
@@ -4301,10 +4347,13 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4362,20 +4411,13 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
NONE);
}
-void Genesis::InitializeGlobal_harmony_async_iteration() {
- if (!FLAG_harmony_async_iteration) return;
- Handle<JSFunction> symbol_fun(native_context()->symbol_function());
- InstallConstant(isolate(), symbol_fun, "asyncIterator",
- factory()->async_iterator_symbol());
-}
-
void Genesis::InitializeGlobal_harmony_promise_finally() {
if (!FLAG_harmony_promise_finally) return;
Handle<JSFunction> constructor(native_context()->promise_function());
Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
- SimpleInstallFunction(prototype, "finally", Builtins::kPromiseFinally, 1,
- true, DONT_ENUM);
+ SimpleInstallFunction(prototype, "finally",
+ Builtins::kPromisePrototypeFinally, 1, true, DONT_ENUM);
// The promise prototype map has changed because we added a property
// to prototype, so we update the saved map.
@@ -4421,6 +4463,7 @@ void Genesis::InitializeGlobal_harmony_bigint() {
Handle<JSFunction> bigint_fun =
InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
+ bigint_fun->shared()->set_builtin_function_id(kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
bigint_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate(), BigIntConstructor_ConstructStub));
@@ -4492,10 +4535,8 @@ void Genesis::InitializeGlobal_harmony_plural_rules() {
#endif // V8_INTL_SUPPORT
-Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice) {
+Handle<JSFunction> Genesis::CreateArrayBuffer(
+ Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
@@ -4519,15 +4560,33 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
JSObject::AddProperty(prototype, factory()->constructor_string(),
array_buffer_fun, DONT_ENUM);
- SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
- kArrayBufferIsView);
+ switch (array_buffer_kind) {
+ case ARRAY_BUFFER:
+ SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
+ kArrayBufferIsView);
+
+ // Install the "byteLength" getter on the {prototype}.
+ SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ Builtins::kArrayBufferPrototypeGetByteLength, false,
+ BuiltinFunctionId::kArrayBufferByteLength);
+
+ SimpleInstallFunction(prototype, "slice",
+ Builtins::kArrayBufferPrototypeSlice, 2, true);
+ break;
- // Install the "byteLength" getter on the {prototype}.
- SimpleInstallGetter(prototype, factory()->byte_length_string(),
- call_byteLength, false, byteLength_id);
+ case SHARED_ARRAY_BUFFER:
+ // Install the "byteLength" getter on the {prototype}.
+ SimpleInstallGetter(prototype, factory()->byte_length_string(),
+ Builtins::kSharedArrayBufferPrototypeGetByteLength,
+ false,
+ BuiltinFunctionId::kSharedArrayBufferByteLength);
- SimpleInstallFunction(prototype, "slice", call_slice, 2, true);
+ SimpleInstallFunction(prototype, "slice",
+ Builtins::kSharedArrayBufferPrototypeSlice, 2,
+ true);
+ break;
+ }
return array_buffer_fun;
}
@@ -4823,9 +4882,9 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
}
// Create a constructor for RegExp results (a variant of Array that
- // predefines the two properties index and match).
+ // predefines the properties index, input, and groups).
{
- // RegExpResult initial map.
+ // JSRegExpResult initial map.
// Find global.Array.prototype to inherit from.
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -4834,16 +4893,20 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Add initial map.
Handle<Map> initial_map = factory()->NewMap(
- JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND, 2);
+ JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND,
+ JSRegExpResult::kInObjectPropertyCount);
initial_map->SetConstructor(*array_constructor);
// Set prototype on map.
- initial_map->set_non_instance_prototype(false);
+ initial_map->set_has_non_instance_prototype(false);
Map::SetPrototype(initial_map, array_prototype);
- // Update map with length accessor from Array and add "index" and "input".
- Map::EnsureDescriptorSlack(initial_map, 3);
+ // Update map with length accessor from Array and add "index", "input" and
+ // "groups".
+ Map::EnsureDescriptorSlack(initial_map,
+ JSRegExpResult::kInObjectPropertyCount + 1);
+ // length descriptor.
{
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
@@ -4857,6 +4920,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
array_descriptors->GetDetails(old).attributes());
initial_map->AppendDescriptor(&d);
}
+
+ // index descriptor.
{
Descriptor d = Descriptor::DataField(factory()->index_string(),
JSRegExpResult::kIndexIndex, NONE,
@@ -4864,6 +4929,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
initial_map->AppendDescriptor(&d);
}
+ // input descriptor.
{
Descriptor d = Descriptor::DataField(factory()->input_string(),
JSRegExpResult::kInputIndex, NONE,
@@ -4871,6 +4937,14 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
initial_map->AppendDescriptor(&d);
}
+ // groups descriptor.
+ {
+ Descriptor d = Descriptor::DataField(factory()->groups_string(),
+ JSRegExpResult::kGroupsIndex, NONE,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&d);
+ }
+
native_context()->set_regexp_result_map(*initial_map);
}
@@ -5482,10 +5556,6 @@ Genesis::Genesis(
ConfigureUtilsObject(context_type);
- // Check that the script context table is empty except for the 'this' binding.
- // We do not need script contexts for native scripts.
- DCHECK_EQ(1, native_context()->script_context_table()->used());
-
native_context()->ResetErrorsThrown();
result_ = native_context();
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 1c31009d93..2b2b9c2b34 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -116,9 +116,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -143,9 +143,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ mov(r3, r1);
@@ -283,14 +283,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
// Preserve the incoming parameters on the stack.
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ SmiTag(r0);
- __ Push(cp, r0, r1, r3);
+ __ Push(cp, r0, r1, r4, r3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -332,9 +334,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -355,9 +358,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r5: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ b(&entry);
@@ -375,9 +379,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -541,7 +546,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -629,8 +634,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ ldr(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r1, r2);
@@ -777,6 +780,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ b(eq, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -791,7 +797,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -871,7 +877,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -935,10 +940,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1194,10 +1201,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1259,7 +1268,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1799,8 +1808,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
fp.bit() | lr.bit());
+ __ Push(Smi::kZero); // Padding.
__ add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1809,8 +1819,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@@ -1889,7 +1898,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r3, &new_target_not_constructor);
__ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsConstructor));
+ __ tst(scratch, Operand(Map::IsConstructorBit::kMask));
__ b(ne, &new_target_constructor);
__ bind(&new_target_not_constructor);
{
@@ -2178,7 +2187,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ tst(r4, Operand(Map::IsCallableBit::kMask));
__ b(eq, &non_callable);
// Check if target is a proxy and call CallProxy external builtin
@@ -2268,7 +2277,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r2, Operand(1 << Map::kIsConstructor));
+ __ tst(r2, Operand(Map::IsConstructorBit::kMask));
__ b(eq, &non_constructor);
// Only dispatch to bound functions after checking whether they are
@@ -2337,17 +2346,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ Push(r1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -2434,8 +2432,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
- __ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ sub(r4, r4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 875f261835..dd92af89bb 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -111,9 +111,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -138,9 +138,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -210,7 +210,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Push number of arguments.
@@ -315,7 +315,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
// Preserve the incoming parameters on the stack.
@@ -348,10 +348,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- x0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -388,9 +389,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: implicit receiver
- // -- x1 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[3*kPointerSize]: padding
+ // -- x1 and sp[4*kPointerSize]: constructor function
+ // -- sp[5*kPointerSize]: number of arguments (tagged)
+ // -- sp[6*kPointerSize]: context
// -----------------------------------
// Round the number of arguments down to the next even number, and claim
@@ -416,14 +418,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
// ----------- S t a t e -------------
- // If argc is odd:
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
- // If argc is even:
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
// -- sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments
// -- sp[4*kPointerSize]: context
@@ -556,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -564,10 +560,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(w10,
FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
- // Claim slots for arguments and receiver.
- __ Add(x11, x10, 1);
+ // Claim slots for arguments and receiver (rounded up to a multiple of two).
+ __ Add(x11, x10, 2);
+ __ Bic(x11, x11, 1);
__ Claim(x11);
+ // Store padding (which might be replaced by the receiver).
+ __ Sub(x11, x11, 1);
+ __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
+
// Poke receiver into highest claimed slot.
__ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
__ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
@@ -578,8 +579,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x10 : argument count
// -- cp : generator context
// -- lr : return address
- // -- jssp[arg count] : generator receiver
- // -- jssp[0 .. arg count - 1] : claimed for args
+ // -- sp[arg count] : generator receiver
+ // -- sp[0 .. arg count - 1] : claimed for args
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
@@ -603,7 +604,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -624,10 +625,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ PushArgument(x4);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -635,9 +636,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Bind(&prepare_step_in_suspended_generator);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
+ __ Push(x1, padreg);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(x1);
+ __ Pop(padreg, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
__ B(&stepping_prepared);
@@ -652,8 +653,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) {
- DCHECK(masm->StackPointer().Is(jssp));
-
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
@@ -767,10 +766,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Mov(x23, x19);
__ Mov(x24, x19);
__ Mov(x25, x19);
+ __ Mov(x28, x19);
// Don't initialize the reserved registers.
// x26 : root register (root).
// x27 : context pointer (cp).
- // x28 : JS stack pointer (jssp).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -820,7 +819,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// Drop receiver + arguments.
if (__ emit_debug_code()) {
__ Tst(args_size, kPointerSize - 1);
- __ Check(eq, kUnexpectedValue);
+ __ Check(eq, AbortReason::kUnexpectedValue);
}
__ Lsr(args_size, args_size, kPointerSizeLog2);
__ DropArguments(args_size);
@@ -873,6 +872,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -887,7 +889,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Cmp(
optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ B(&fallthrough);
}
@@ -967,7 +969,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
// Load the size of the current bytecode.
__ Bind(&load_size);
@@ -985,7 +986,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
// - x3: the incoming new target or generator object
// - cp: our context.
// - fp: our caller's frame pointer.
-// - jssp: stack pointer.
// - lr: return address.
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
@@ -1009,7 +1009,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1030,11 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1058,8 +1060,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ Sub(x10, jssp, Operand(x11));
+ __ Sub(x10, __ StackPointer(), Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1181,10 +1182,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Unreachable();
__ Bind(&done);
- // TODO(arm64): Claim one extra slot for padding and store padreg to the
- // padding slot.
+ // Round up to an even number of slots and claim them.
+ __ Add(slots_to_claim, slots_to_claim, 1);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
+ {
+ // Store padding, which may be overwritten.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Sub(scratch, slots_to_claim, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
+ }
+
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
@@ -1311,11 +1321,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
- kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ AssertNotSmi(
+ kInterpreterBytecodeArrayRegister,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1375,7 +1387,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1634,7 +1646,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, jssp, frame_size);
+ __ Add(fp, __ StackPointer(), frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1770,9 +1782,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argArray (if argc == 2)
- // -- jssp[8] : thisArg (if argc >= 1)
- // -- jssp[16] : receiver
+ // -- sp[0] : argArray (if argc == 2)
+ // -- sp[8] : thisArg (if argc >= 1)
+ // -- sp[16] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
@@ -1824,7 +1836,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argArray
// -- x1 : receiver
- // -- jssp[0] : thisArg
+ // -- sp[0] : thisArg
// -----------------------------------
// 2. We don't need to check explicitly for callable receiver here,
@@ -1855,55 +1867,65 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
Register function = x1;
- Register scratch1 = x10;
- Register scratch2 = x11;
ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
- // 1. Make sure we have at least one argument.
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+
+ // 2. Handle case with no arguments.
{
- Label done;
- __ Cbnz(argc, &done);
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Push(scratch1);
- __ Mov(argc, 1);
- __ Bind(&done);
+ Label non_zero;
+ Register scratch = x10;
+ __ Cbnz(argc, &non_zero);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ // Overwrite receiver with undefined, which will be the new receiver.
+ // We do not need to overwrite the padding slot above it with anything.
+ __ Poke(scratch, 0);
+ // Call function. The argument count is already zero.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Bind(&non_zero);
}
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ // 3. Overwrite the receiver with padding. If argc is odd, this is all we
+ // need to do.
+ Label arguments_ready;
+ __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Tbnz(argc, 0, &arguments_ready);
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
+ // 4. If argc is even:
+ // Copy arguments two slots higher in memory, overwriting the original
+ // receiver and padding.
{
Label loop;
- // Calculate the copy start address (destination). Copy end address is jssp.
- __ SlotAddress(scratch2, argc);
- __ Sub(scratch1, scratch2, kPointerSize);
-
- __ Bind(&loop);
- __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
- __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
- __ Cmp(scratch1, jssp);
- __ B(ge, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Sub(argc, argc, 1);
- __ Drop(1);
+ Register copy_from = x10;
+ Register copy_to = x11;
+ Register count = x12;
+ Register last_arg_slot = x13;
+ __ Mov(count, argc);
+ __ Sub(last_arg_slot, argc, 1);
+ __ SlotAddress(copy_from, last_arg_slot);
+ __ Add(copy_to, copy_from, 2 * kPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, count,
+ TurboAssembler::kSrcLessThanDst);
+ // Drop two slots. These are copies of the last two arguments.
+ __ Drop(2);
}
- // 4. Call the callable.
+ // 5. Adjust argument count to make the original first argument the new
+ // receiver and call the callable.
+ __ Bind(&arguments_ready);
+ __ Sub(argc, argc, 1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : argumentsList (if argc == 3)
- // -- jssp[8] : thisArgument (if argc >= 2)
- // -- jssp[16] : target (if argc >= 1)
- // -- jssp[24] : receiver
+ // -- sp[0] : argumentsList (if argc == 3)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[16] : target (if argc >= 1)
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectApply");
@@ -1962,7 +1984,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x2 : argumentsList
// -- x1 : target
- // -- jssp[0] : thisArgument
+ // -- sp[0] : thisArgument
// -----------------------------------
// 2. We don't need to check explicitly for callable target here,
@@ -1977,10 +1999,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- jssp[0] : new.target (optional)
- // -- jssp[8] : argumentsList
- // -- jssp[16] : target
- // -- jssp[24] : receiver
+ // -- sp[0] : new.target (optional)
+ // -- sp[8] : argumentsList
+ // -- sp[16] : target
+ // -- sp[24] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
@@ -2044,7 +2066,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- x2 : argumentsList
// -- x1 : target
// -- x3 : new.target
- // -- jssp[0] : receiver (undefined)
+ // -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. We don't need to check explicitly for constructor target here,
@@ -2060,25 +2082,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+namespace {
+
+void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(lr, fp);
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(x11, x1); // x1: function
- // We do not yet push the number of arguments, to maintain a 16-byte aligned
- // stack pointer. This is done in step (3) in
- // Generate_ArgumentsAdaptorTrampoline.
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ SmiTag(x11, x0); // x0: number of arguments.
+ __ Push(x11, padreg);
+ __ Add(fp, __ StackPointer(),
+ ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : result being passed through
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
- __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
- __ Mov(jssp, fp);
+ __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Mov(__ StackPointer(), fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2086,6 +2109,67 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
}
+// Prepares the stack for copying the varargs. First we claim the necessary
+// slots, taking care of potential padding. Then we copy the existing arguments
+// one slot up or one slot down, as needed.
+void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
+ Register len) {
+ Label len_odd, exit;
+ Register slots_to_copy = x10; // If needed.
+ __ Add(slots_to_copy, argc, 1);
+ __ Add(argc, argc, len);
+ __ Tbnz(len, 0, &len_odd);
+ __ Claim(len);
+ __ B(&exit);
+
+ __ Bind(&len_odd);
+ // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
+ // one extra padding slot. If argc is odd, we know that the original arguments
+ // will have a padding slot we can reuse (since len is odd), so
+ // slots_to_claim = len - 1.
+ {
+ Register scratch = x11;
+ Register slots_to_claim = x12;
+ __ Add(slots_to_claim, len, 1);
+ __ And(scratch, argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ __ Claim(slots_to_claim);
+ }
+
+ Label copy_down;
+ __ Tbz(slots_to_copy, 0, &copy_down);
+
+ // Copy existing arguments one slot up.
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Sub(scratch, argc, 1);
+ __ SlotAddress(src, scratch);
+ __ SlotAddress(dst, argc);
+ __ CopyDoubleWords(dst, src, slots_to_copy,
+ TurboAssembler::kSrcLessThanDst);
+ }
+ __ B(&exit);
+
+ // Copy existing arguments one slot down and add padding.
+ __ Bind(&copy_down);
+ {
+ Register src = x11;
+ Register dst = x12;
+ Register scratch = x13;
+ __ Add(src, len, 1);
+ __ Mov(dst, len); // CopySlots will corrupt dst.
+ __ CopySlots(dst, src, slots_to_copy);
+ __ Add(scratch, argc, 1);
+ __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
+ }
+
+ __ Bind(&exit);
+}
+
+} // namespace
+
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2118,30 +2202,34 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Bind(&done);
}
- // Push arguments onto the stack (thisArgument is already on the stack).
- {
- Label done, push, loop;
- Register src = x5;
+ // Skip argument setup if we don't need to push any varargs.
+ Label done;
+ __ Cbz(len, &done);
- __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(argc, argc, len); // The 'len' argument for Call() or Construct().
- __ Cbz(len, &done);
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Label loop;
+ Register src = x10;
Register the_hole_value = x11;
Register undefined_value = x12;
- // We do not use the CompareRoot macro as it would do a LoadRoot behind the
- // scenes and we want to avoid that in a loop.
+ Register scratch = x13;
+ __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
__ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- __ Claim(len);
+ // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+ // scenes and we want to avoid that in a loop.
+ // TODO(all): Consider using Ldp and Stp.
__ Bind(&loop);
__ Sub(len, len, 1);
- __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
- __ Cmp(x10, the_hole_value);
- __ Csel(x10, x10, undefined_value, ne);
- __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+ __ Cmp(scratch, the_hole_value);
+ __ Csel(scratch, scratch, undefined_value, ne);
+ __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
__ Cbnz(len, &loop);
- __ Bind(&done);
}
+ __ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2158,13 +2246,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- x2 : start index (to support rest parameters)
// -----------------------------------
+ Register argc = x0;
+ Register start_index = x2;
+
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(x3, &new_target_not_constructor);
__ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(x5, 1 << Map::kIsConstructor,
+ __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
&new_target_constructor);
__ Bind(&new_target_not_constructor);
{
@@ -2177,49 +2268,57 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
// Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ldr(x5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(x6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x6, FieldMemOperand(x6, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x6, FieldMemOperand(
- x6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Mov(x5, fp);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
+ // args_fp will point to the frame that contains the actual arguments, which
+ // will be the current frame unless we have an arguments adaptor frame, in
+ // which case args_fp points to the arguments adaptor frame.
+ Register args_fp = x5;
+ Register len = x6;
{
- // Just load the length from ArgumentsAdaptorFrame.
- __ Ldrsw(x6, UntagSmiMemOperand(
- x5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ Label arguments_adaptor, arguments_done;
+ Register scratch = x10;
+ __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(args_fp,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &arguments_adaptor);
+ {
+ __ Ldr(scratch,
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(len,
+ FieldMemOperand(
+ scratch, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Mov(args_fp, fp);
+ }
+ __ B(&arguments_done);
+ __ Bind(&arguments_adaptor);
+ {
+ // Just load the length from ArgumentsAdaptorFrame.
+ __ Ldrsw(len,
+ UntagSmiMemOperand(
+ args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ }
+ __ Bind(&arguments_done);
}
- __ Bind(&arguments_done);
Label stack_done, stack_overflow;
- __ Subs(x6, x6, x2);
+ __ Subs(len, len, start_index);
__ B(le, &stack_done);
- {
- // Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, &stack_overflow);
+ // Check for stack overflow.
+ Generate_StackOverflowCheck(masm, x6, &stack_overflow);
- // Forward the arguments from the caller frame.
- {
- Label loop;
- __ Add(x5, x5, kPointerSize);
- __ Add(x0, x0, x6);
- __ Bind(&loop);
- {
- __ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2));
- __ Push(x4);
- __ Subs(x6, x6, 1);
- __ B(ne, &loop);
- }
- }
+ Generate_PrepareForCopyingVarargs(masm, argc, len);
+
+ // Push varargs.
+ {
+ Register dst = x13;
+ __ Add(args_fp, args_fp, 2 * kPointerSize);
+ __ SlotAddress(dst, 0);
+ __ CopyDoubleWords(dst, args_fp, len);
}
__ B(&stack_done);
+
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&stack_done);
@@ -2338,12 +2437,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
+ Register bound_argc = x4;
+ Register bound_argv = x2;
+
// Load [[BoundArguments]] into x2 and length of that into x4.
Label no_bound_arguments;
- __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Cmp(x4, 0);
- __ B(eq, &no_bound_arguments);
+ __ Ldr(bound_argv,
+ FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Ldrsw(bound_argc,
+ UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
+ __ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2353,44 +2456,97 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- x4 : the number of [[BoundArguments]]
// -----------------------------------
+ Register argc = x0;
+
+ // Check for stack overflow.
{
- Label done;
- __ Claim(x4);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ Label done;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, masm->StackPointer(), x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
- // Restore the stack pointer.
- __ Drop(x4);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
- UseScratchRegisterScope temps(masm);
- Register argc = temps.AcquireX();
- // Relocate arguments down the stack.
- __ Mov(argc, x0);
- __ CopySlots(0, x4, argc);
+ // Check if we need padding.
+ Label copy_args, copy_bound_args;
+ Register total_argc = x15;
+ Register slots_to_claim = x12;
+ __ Add(total_argc, argc, bound_argc);
+ __ Mov(slots_to_claim, bound_argc);
+ __ Tbz(bound_argc, 0, &copy_args);
+
+ // Load receiver before we start moving the arguments. We will only
+ // need this in this path because the bound arguments are odd.
+ Register receiver = x14;
+ __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
- // Copy [[BoundArguments]] to the stack (below the arguments). The first
- // element of the array is copied to the highest address.
+ // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
+ // as we need one extra padding slot. If argc is odd, we know that the
+ // original arguments will have a padding slot we can reuse (since
+ // bound_argc is odd), so slots_to_claim = bound_argc - 1.
{
- Label loop;
- __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
- __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SlotAddress(x11, x0);
- __ Add(x0, x0, x4);
- __ Bind(&loop);
- __ Sub(x4, x4, 1);
- __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
- // Poke into claimed area of stack.
- __ Str(x10, MemOperand(x11, kPointerSize, PostIndex));
- __ Cbnz(x4, &loop);
+ Register scratch = x11;
+ __ Add(slots_to_claim, bound_argc, 1);
+ __ And(scratch, total_argc, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ // Copy bound arguments.
+ __ Bind(&copy_args);
+ // Skip claim and copy of existing arguments in the special case where we
+ // do not need to claim any slots (this will be the case when
+ // bound_argc == 1 and the existing arguments have padding we can reuse).
+ __ Cbz(slots_to_claim, &copy_bound_args);
+ __ Claim(slots_to_claim);
+ {
+ Register count = x10;
+ // Relocate arguments to a lower address.
+ __ Mov(count, argc);
+ __ CopySlots(0, slots_to_claim, count);
+
+ __ Bind(&copy_bound_args);
+ // Copy [[BoundArguments]] to the stack (below the arguments). The first
+ // element of the array is copied to the highest address.
+ {
+ Label loop;
+ Register counter = x10;
+ Register scratch = x11;
+ Register copy_to = x12;
+ __ Add(bound_argv, bound_argv,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SlotAddress(copy_to, argc);
+ __ Add(argc, argc,
+ bound_argc); // Update argc to include bound arguments.
+ __ Lsl(counter, bound_argc, kPointerSizeLog2);
+ __ Bind(&loop);
+ __ Sub(counter, counter, kPointerSize);
+ __ Ldr(scratch, MemOperand(bound_argv, counter));
+ // Poke into claimed area of stack.
+ __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
+ __ Cbnz(counter, &loop);
+ }
+
+ {
+ Label done;
+ Register scratch = x10;
+ __ Tbz(bound_argc, 0, &done);
+ // Store receiver.
+ __ Add(scratch, __ StackPointer(),
+ Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
+ __ Tbnz(total_argc, 0, &done);
+ // Store padding.
+ __ Str(padreg, MemOperand(scratch));
+ __ Bind(&done);
+ }
}
}
__ Bind(&no_bound_arguments);
@@ -2438,7 +2594,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable);
// Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
@@ -2533,7 +2689,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+ __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
+ &non_constructor);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2605,19 +2762,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_AbortJS");
- // ----------- S t a t e -------------
- // -- x1 : message as String object
- // -- lr : return address
- // -----------------------------------
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- __ PushArgument(x1);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -2651,14 +2795,16 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// 4 | num of | |
// | actual args | |
// |- - - - - - - - -| |
- // [5] | [padding] | |
+ // 5 | padding | |
// |-----------------+---- |
- // 5+pad | receiver | ^ |
+ // [6] | [padding] | ^ |
+ // |- - - - - - - - -| | |
+ // 6+pad | receiver | | |
// | (parameter 0) | | |
// |- - - - - - - - -| | |
- // 6+pad | parameter 1 | | |
+ // 7+pad | parameter 1 | | |
// |- - - - - - - - -| Frame slots ----> expected args
- // 7+pad | parameter 2 | | |
+ // 8+pad | parameter 2 | | |
// |- - - - - - - - -| | |
// | | | |
// ... | ... | | |
@@ -2671,7 +2817,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// | [undefined] | v <-- stack ptr v
// -----+-----------------+---------------------------------
//
- // There is an optional slot of padding to ensure stack alignment.
+ // There is an optional slot of padding above the receiver to ensure stack
+ // alignment of the arguments.
// If the number of expected arguments is larger than the number of actual
// arguments, the remaining expected slots will be filled with undefined.
@@ -2695,10 +2842,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_unused_actual = x14;
Register scratch1 = x15, scratch2 = x16;
- // We need slots for the expected arguments, with two extra slots for the
- // number of actual arguments and the receiver.
+ // We need slots for the expected arguments, with one extra slot for the
+ // receiver.
__ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 2);
+ __ Add(scratch1, argc_expected, 1);
Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
@@ -2707,7 +2854,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, jssp);
+ __ Mov(copy_to, __ StackPointer());
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2738,7 +2885,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&enough_arguments);
// (2) Copy all of the actual arguments, or as many as we need.
+ Label skip_copy;
__ RecordComment("-- Copy actual arguments --");
+ __ Cbz(argc_to_copy, &skip_copy);
__ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
__ Add(copy_from, fp, 2 * kPointerSize);
// Adjust for difference between actual and expected arguments.
@@ -2755,21 +2904,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
__ Cmp(copy_end, copy_to);
__ B(hi, &copy_2_by_2);
+ __ Bind(&skip_copy);
- // (3) Store number of actual arguments and padding. The padding might be
- // unnecessary, in which case it will be overwritten by the receiver.
- __ RecordComment("-- Store number of args and padding --");
- __ SmiTag(scratch1, argc_actual);
- __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize));
+ // (3) Store padding, which might be overwritten by the receiver, if it is not
+ // necessary.
+ __ RecordComment("-- Store padding --");
+ __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
- // (4) Store receiver. Calculate target address from jssp to avoid checking
+ // (4) Store receiver. Calculate target address from the sp to avoid checking
// for padding. Storing the receiver will overwrite either the extra slot
// we copied with the actual arguments, if we did copy one, or the padding we
// stored above.
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1,
+ MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2805,10 +2955,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // Wasm code uses the csp. This builtin excepts to use the jssp.
- // Thus, move csp to jssp when entering this builtin (called from wasm).
- DCHECK(masm->StackPointer().is(jssp));
- __ Move(jssp, csp);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2833,9 +2979,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
- // Move back to csp land. jssp now has the same value as when entering this
- // function, but csp might have changed in the runtime call.
- __ Move(csp, jssp);
// Now jump to the instructions of the returned code object.
__ Jump(x8);
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 5fec0abfa5..027baa2873 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -31,6 +31,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
PostLoopAction;
+ enum class MissingPropertyMode { kSkip, kUseUndefined };
+
void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
Node* FindProcessor(Node* k_value, Node* k) {
@@ -383,6 +385,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
+ MissingPropertyMode missing_property_mode,
ForEachDirection direction = ForEachDirection::kForward) {
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
@@ -439,7 +442,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
generator(this);
- HandleFastElements(processor, action, &fully_spec_compliant_, direction);
+ HandleFastElements(processor, action, &fully_spec_compliant_, direction,
+ missing_property_mode);
BIND(&fully_spec_compliant_);
@@ -550,6 +554,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void GenerateIteratingArrayBuiltinLoopContinuation(
const CallResultProcessor& processor, const PostLoopAction& action,
+ MissingPropertyMode missing_property_mode,
ForEachDirection direction = ForEachDirection::kForward) {
Label loop(this, {&k_, &a_, &to_});
Label after_loop(this);
@@ -558,11 +563,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
{
if (direction == ForEachDirection::kForward) {
// 8. Repeat, while k < len
- GotoIfNumericGreaterThanOrEqual(k(), len_, &after_loop);
+ GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
} else {
// OR
// 10. Repeat, while k >= 0
- GotoIfNumericGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
+ GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
}
Label done_element(this, &to_);
@@ -572,12 +577,15 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// index in the range [0, 2^32-1).
CSA_ASSERT(this, IsNumberArrayIndex(k()));
- // b. Let kPresent be HasProperty(O, Pk).
- // c. ReturnIfAbrupt(kPresent).
- Node* k_present = HasProperty(o(), k(), context(), kHasProperty);
+ if (missing_property_mode == MissingPropertyMode::kSkip) {
+ // b. Let kPresent be HasProperty(O, Pk).
+ // c. ReturnIfAbrupt(kPresent).
+ TNode<Oddball> k_present =
+ HasProperty(o(), k(), context(), kHasProperty);
- // d. If kPresent is true, then
- GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
+ // d. If kPresent is true, then
+ GotoIf(IsFalse(k_present), &done_element);
+ }
// i. Let kValue be Get(O, Pk).
// ii. ReturnIfAbrupt(kValue).
@@ -655,7 +663,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void VisitAllFastElementsOneKind(ElementsKind kind,
const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode,
- ForEachDirection direction) {
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
@@ -670,7 +679,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
list, start, end,
[=, &original_map](Node* index) {
k_.Bind(ParameterToTagged(index, mode));
- Label one_element_done(this), hole_element(this);
+ Label one_element_done(this), hole_element(this),
+ process_element(this);
// Check if o's map has changed during the callback. If so, we have to
// fall back to the slower spec implementation for the rest of the
@@ -693,24 +703,32 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
? FixedArray::kHeaderSize
: (FixedArray::kHeaderSize - kHeapObjectTag);
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
- Node* value = nullptr;
+ VARIABLE(value, MachineRepresentation::kTagged);
if (kind == PACKED_ELEMENTS) {
- value = LoadObjectField(elements, offset);
- GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
+ value.Bind(LoadObjectField(elements, offset));
+ GotoIf(WordEqual(value.value(), TheHoleConstant()), &hole_element);
} else {
Node* double_value =
LoadDoubleWithHoleCheck(elements, offset, &hole_element);
- value = AllocateHeapNumberWithValue(double_value);
+ value.Bind(AllocateHeapNumberWithValue(double_value));
}
- a_.Bind(processor(this, value, k()));
- Goto(&one_element_done);
+ Goto(&process_element);
BIND(&hole_element);
- // Check if o's prototype change unexpectedly has elements after the
- // callback in the case of a hole.
- BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
- array_changed);
-
+ if (missing_property_mode == MissingPropertyMode::kSkip) {
+ // Check if o's prototype change unexpectedly has elements after
+ // the callback in the case of a hole.
+ BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
+ array_changed);
+ } else {
+ value.Bind(UndefinedConstant());
+ Goto(&process_element);
+ }
+ BIND(&process_element);
+ {
+ a_.Bind(processor(this, value.value(), k()));
+ Goto(&one_element_done);
+ }
BIND(&one_element_done);
},
1, mode, advance_mode);
@@ -719,7 +737,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void HandleFastElements(const CallResultProcessor& processor,
const PostLoopAction& action, Label* slow,
- ForEachDirection direction) {
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Label switch_on_elements_kind(this), fast_elements(this),
maybe_double_elements(this), fast_double_elements(this);
@@ -742,7 +761,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast_elements);
{
VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
- direction);
+ direction, missing_property_mode);
action(this);
@@ -757,7 +776,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast_double_elements);
{
VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
- direction);
+ direction, missing_property_mode);
action(this);
@@ -879,7 +898,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
};
-TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
@@ -977,7 +996,7 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
}
}
-TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
TVARIABLE(IntPtrT, arg_index);
Label default_label(this, &arg_index);
Label smi_transition(this);
@@ -1106,9 +1125,10 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
}
}
-class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
+class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
public:
- explicit FastArraySliceCodeStubAssembler(compiler::CodeAssemblerState* state)
+ explicit ArrayPrototypeSliceCodeStubAssembler(
+ compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
@@ -1245,11 +1265,11 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
- Node* k_present = HasProperty(o, p_k, context, kHasProperty);
+ TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
// d. If kPresent is true, then
Label done_element(this);
- GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
+ GotoIf(IsFalse(k_present), &done_element);
// i. Let kValue be Get(O, Pk).
// ii. ReturnIfAbrupt(kValue).
@@ -1264,10 +1284,10 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler {
}
};
-TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Node* const argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
@@ -1339,15 +1359,15 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// 5. Let relativeStart be ToInteger(start).
// 6. ReturnIfAbrupt(relativeStart).
- Node* arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
- Node* relative_start = ToInteger(context, arg0);
+ TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0)));
+ Node* relative_start = ToInteger_Inline(context, arg0);
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
// else let k be min(relativeStart, len.value()).
VARIABLE(k, MachineRepresentation::kTagged);
Label relative_start_positive(this), relative_start_done(this);
- GotoIfNumericGreaterThanOrEqual(relative_start, SmiConstant(0),
- &relative_start_positive);
+ GotoIfNumberGreaterThanOrEqual(relative_start, SmiConstant(0),
+ &relative_start_positive);
k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0)));
Goto(&relative_start_done);
BIND(&relative_start_positive);
@@ -1358,11 +1378,12 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// 8. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ToInteger(end).
// 9. ReturnIfAbrupt(relativeEnd).
- Node* end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ TNode<Object> end =
+ CAST(args.GetOptionalArgumentValue(1, UndefinedConstant()));
Label end_undefined(this), end_done(this);
VARIABLE(relative_end, MachineRepresentation::kTagged);
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
- relative_end.Bind(ToInteger(context, end));
+ relative_end.Bind(ToInteger_Inline(context, end));
Goto(&end_done);
BIND(&end_undefined);
relative_end.Bind(len.value());
@@ -1373,8 +1394,8 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
// else let final be min(relativeEnd, len).
VARIABLE(final, MachineRepresentation::kTagged);
Label relative_end_positive(this), relative_end_done(this);
- GotoIfNumericGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
- &relative_end_positive);
+ GotoIfNumberGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
+ &relative_end_positive);
final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()),
NumberConstant(0)));
Goto(&relative_end_done);
@@ -1412,7 +1433,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
BIND(&loop);
{
// 15. Repeat, while k < final
- GotoIfNumericGreaterThanOrEqual(k.value(), final.value(), &after_loop);
+ GotoIfNumberGreaterThanOrEqual(k.value(), final.value(), &after_loop);
Node* p_k = k.value(); // ToString(context, k.value()) is no-op
@@ -1438,7 +1459,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) {
args.PopAndReturn(a);
}
-TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
+TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
@@ -1619,6 +1640,206 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(CloneFastJSArray(context, array, mode));
}
+TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* to = Parameter(Descriptor::kTo);
+
+ InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
+ this_arg, array, object, initial_k,
+ len, to);
+
+ GenerateIteratingArrayBuiltinLoopContinuation(
+ &ArrayBuiltinCodeStubAssembler::FindProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+// Continuation that is called after an eager deoptimization from TF (ex. the
+// array changes during iteration).
+TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// Continuation that is called after a lazy deoptimization from TF (ex. the
+// callback function is no longer callable).
+TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// Continuation that is called after a lazy deoptimization from TF that happens
+// right after the callback and it's returned value must be handled before
+// iteration continues.
+TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* found_value = Parameter(Descriptor::kFoundValue);
+ Node* is_found = Parameter(Descriptor::kIsFound);
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the callback
+ // value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
+ BIND(&if_true);
+ Return(found_value);
+ BIND(&if_false);
+ Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// ES #sec-get-%typedarray%.prototype.find
+TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingArrayBuiltinBody(
+ "Array.prototype.find",
+ &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* to = Parameter(Descriptor::kTo);
+
+ InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
+ this_arg, array, object, initial_k,
+ len, to);
+
+ GenerateIteratingArrayBuiltinLoopContinuation(
+ &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
+TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* found_value = Parameter(Descriptor::kFoundValue);
+ Node* is_found = Parameter(Descriptor::kIsFound);
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the callback
+ // value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
+ BIND(&if_true);
+ Return(found_value);
+ BIND(&if_false);
+ Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
+ receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
+ initial_k, len, UndefinedConstant()));
+}
+
+// ES #sec-get-%typedarray%.prototype.findIndex
+TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingArrayBuiltinBody(
+ "Array.prototype.findIndex",
+ &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ Builtins::CallableFor(isolate(),
+ Builtins::kArrayFindIndexLoopContinuation),
+ MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
+}
+
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
Node* argc =
@@ -1678,7 +1899,8 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
@@ -1690,11 +1912,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg,
- UndefinedConstant(), receiver, initial_k, len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
@@ -1706,11 +1926,9 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg,
- UndefinedConstant(), receiver, initial_k, len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
+ callbackfn, this_arg, UndefinedConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
@@ -1731,8 +1949,8 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(),
- Builtins::kArrayForEachLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
@@ -1755,6 +1973,48 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ Label true_continue(this), false_continue(this);
+
+ // iii. If selected is true, then...
+ BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
+ BIND(&true_continue);
+ { Return(TrueConstant()); }
+ BIND(&false_continue);
+ {
+ // Increment k.
+ initial_k = NumberInc(initial_k);
+
+ Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
+ callbackfn, this_arg, FalseConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+ }
+}
+
+TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
+ callbackfn, this_arg, FalseConstant(), receiver, initial_k,
+ len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1772,7 +2032,8 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
@@ -1793,7 +2054,8 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
@@ -1816,6 +2078,49 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ Label true_continue(this), false_continue(this);
+
+ // iii. If selected is true, then...
+ BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
+ BIND(&true_continue);
+ {
+ // Increment k.
+ initial_k = NumberInc(initial_k);
+
+ Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
+ callbackfn, this_arg, TrueConstant(), receiver,
+ initial_k, len, UndefinedConstant()));
+ }
+ BIND(&false_continue);
+ { Return(FalseConstant()); }
+}
+
+TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
+ callbackfn, this_arg, TrueConstant(), receiver, initial_k,
+ len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1833,7 +2138,8 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
@@ -1854,7 +2160,8 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
@@ -1894,7 +2201,38 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ MissingPropertyMode::kSkip);
+}
+
+TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* accumulator = Parameter(Descriptor::kAccumulator);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ accumulator, receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ result, receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
@@ -1915,7 +2253,8 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
@@ -1956,7 +2295,37 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- ForEachDirection::kReverse);
+ MissingPropertyMode::kSkip, ForEachDirection::kReverse);
+}
+
+TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* accumulator = Parameter(Descriptor::kAccumulator);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ accumulator, receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ result, receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
@@ -1979,7 +2348,7 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayReduceRightLoopContinuation),
- ForEachDirection::kReverse);
+ MissingPropertyMode::kSkip, ForEachDirection::kReverse);
}
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
@@ -2020,7 +2389,8 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
@@ -2034,10 +2404,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
Node* len = Parameter(Descriptor::kLength);
Node* to = Parameter(Descriptor::kTo);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, to));
+ Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ to));
}
TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
@@ -2077,10 +2446,9 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
// Increment k.
initial_k = NumberInc(initial_k);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, to.value()));
+ Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ to.value()));
}
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
@@ -2101,7 +2469,8 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2121,7 +2490,8 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinLoopContinuation(
&ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2133,10 +2503,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ UndefinedConstant()));
}
TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -2159,10 +2528,9 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// Then we have to increment k before going on.
initial_k = NumberInc(initial_k);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
- receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
+ callbackfn, this_arg, array, receiver, initial_k, len,
+ UndefinedConstant()));
}
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
@@ -2182,7 +2550,8 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
"Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
&ArrayBuiltinCodeStubAssembler::FastMapProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
+ MissingPropertyMode::kSkip);
}
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
@@ -2848,7 +3217,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
{
Label if_invalid(this, Label::kDeferred);
// A fast array iterator transitioned to a slow iterator during
- // iteration. Invalidate fast_array_iteration_prtoector cell to
+ // iteration. Invalidate fast_array_iteration_protector cell to
// prevent potential deopt loops.
StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectMapOffset,
@@ -2877,7 +3246,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
length = var_length.value();
}
- GotoIfNumericGreaterThanOrEqual(index, length, &set_done);
+ GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 060696ee5d..0cdcb57a3f 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -161,6 +161,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kPointerSize)));
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index aec265dc35..392040c995 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -232,10 +232,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
- TNode<Object> iterator =
- CAST(iterator_assembler.GetIterator(context, iterable));
+ IteratorRecord iterator = iterator_assembler.GetIterator(context, iterable);
- CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator)));
+ CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
TNode<Object> fast_iterator_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 2722f7b7a7..5c3883a870 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -134,6 +134,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
BIND(&cell_done);
}
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
@@ -457,10 +458,10 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
VARIABLE(var_properties, MachineRepresentation::kTagged);
{
Node* bit_field_3 = LoadMapBitField3(boilerplate_map);
- GotoIf(IsSetWord32<Map::Deprecated>(bit_field_3), call_runtime);
+ GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime);
// Directly copy over the property store for dict-mode boilerplates.
Label if_dictionary(this), if_fast(this), done(this);
- Branch(IsSetWord32<Map::DictionaryMap>(bit_field_3), &if_dictionary,
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field_3), &if_dictionary,
&if_fast);
BIND(&if_dictionary);
{
@@ -636,8 +637,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
CSA_ASSERT(this, IsMap(map));
// Ensure that slack tracking is disabled for the map.
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this,
- IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
+ CSA_ASSERT(
+ this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
Node* empty_fixed_array = EmptyFixedArrayConstant();
Node* result =
AllocateJSObjectFromMap(map, empty_fixed_array, empty_fixed_array);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 823e6ca937..98e0f2c8b2 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -99,10 +99,9 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
}
TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(StringToNumber(context, input));
+ Return(StringToNumber(input));
}
TF_BUILTIN(ToName, CodeStubAssembler) {
@@ -145,10 +144,9 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(NumberToString(context, input));
+ Return(NumberToString(input));
}
// ES section #sec-tostring
@@ -330,7 +328,14 @@ TF_BUILTIN(ToInteger, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Return(ToInteger(context, input));
+ Return(ToInteger(context, input, kNoTruncation));
+}
+
+TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(ToInteger(context, input, kTruncateMinusZero));
}
// ES6 section 7.1.13 ToObject (argument)
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 2b2cc407b5..0ffd15df7c 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -91,8 +91,9 @@ namespace internal {
ASM(StackCheck) \
\
/* String helpers */ \
- TFC(StringCharAt, StringCharAt, 1) \
- TFC(StringCharCodeAt, StringCharCodeAt, 1) \
+ TFC(StringCharAt, StringAt, 1) \
+ TFC(StringCharCodeAt, StringAt, 1) \
+ TFC(StringCodePointAt, StringAt, 1) \
TFC(StringEqual, Compare, 1) \
TFC(StringGreaterThan, Compare, 1) \
TFC(StringGreaterThanOrEqual, Compare, 1) \
@@ -190,6 +191,7 @@ namespace internal {
TFC(NumberToString, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
TFC(ToInteger, TypeConversion, 1) \
+ TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
@@ -199,26 +201,19 @@ namespace internal {
TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
\
/* Handlers */ \
- TFH(LoadICProtoArray, LoadICProtoArray) \
- TFH(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray) \
TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
- TFH(KeyedLoadIC_Miss, LoadWithVector) \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
TFH(KeyedLoadIC_Slow, LoadWithVector) \
TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \
- TFH(KeyedStoreIC_Miss, StoreWithVector) \
TFH(KeyedStoreIC_Slow, StoreWithVector) \
- TFH(LoadGlobalIC_Miss, LoadGlobalWithVector) \
- TFH(LoadGlobalIC_Slow, LoadGlobalWithVector) \
+ TFH(LoadGlobalIC_Slow, LoadWithVector) \
TFH(LoadField, LoadField) \
TFH(LoadIC_FunctionPrototype, LoadWithVector) \
- TFH(LoadIC_Miss, LoadWithVector) \
TFH(LoadIC_Slow, LoadWithVector) \
TFH(LoadIC_StringLength, LoadWithVector) \
TFH(LoadIC_StringWrapperLength, LoadWithVector) \
TFH(LoadIC_Uninitialized, LoadWithVector) \
TFH(StoreGlobalIC_Slow, StoreWithVector) \
- TFH(StoreIC_Miss, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
/* Promise helpers */ \
@@ -226,6 +221,9 @@ namespace internal {
TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \
TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \
kResultPromise) \
+ TFS(EnqueueMicrotask, kMicrotask) \
+ TFC(RunMicrotasks, RunMicrotasks, 1) \
+ TFS(PromiseResolveThenableJob, kMicrotask) \
\
/* Object property helpers */ \
TFS(HasProperty, kKey, kObject) \
@@ -233,7 +231,7 @@ namespace internal {
\
/* Abort */ \
ASM(Abort) \
- ASM(AbortJS) \
+ TFC(AbortJS, AbortJS, 1) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -255,16 +253,16 @@ namespace internal {
TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
- TFJ(FastArrayPop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
- TFJ(FastArrayPush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
- TFJ(FastArrayShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
CPP(ArraySlice) \
- TFJ(FastArraySlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.splice */ \
CPP(ArraySplice) \
/* ES6 #sec-array.prototype.unshift */ \
@@ -283,10 +281,18 @@ namespace internal {
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.some */ \
TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
+ kLength, kResult) \
TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.filter */ \
TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
@@ -307,13 +313,42 @@ namespace internal {
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kAccumulator) \
+ TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kResult) \
TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kAccumulator) \
+ TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
+ kLength, kResult) \
TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.entries */ \
TFJ(ArrayPrototypeEntries, 0) \
+ /* ES6 #sec-array.prototype.find */ \
+ TFS(ArrayFindLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \
+ kLength, kResult) \
+ TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayPrototypeFind, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.prototype.findIndex */ \
+ TFS(ArrayFindIndexLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
+ kArray, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
+ TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \
+ kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \
+ TFJ(ArrayPrototypeFindIndex, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.keys */ \
TFJ(ArrayPrototypeKeys, 0) \
/* ES6 #sec-array.prototype.values */ \
@@ -555,6 +590,8 @@ namespace internal {
TFH(LoadICTrampoline, Load) \
TFH(KeyedLoadIC, LoadWithVector) \
TFH(KeyedLoadICTrampoline, Load) \
+ TFH(StoreGlobalIC, StoreGlobalWithVector) \
+ TFH(StoreGlobalICTrampoline, StoreGlobal) \
TFH(StoreIC, StoreWithVector) \
TFH(StoreICTrampoline, Store) \
TFH(KeyedStoreIC, StoreWithVector) \
@@ -718,7 +755,7 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
+ TFJ(ObjectEntries, 1, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -745,8 +782,10 @@ namespace internal {
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
+ /* ES #sec-object.prototype.tolocalestring */ \
+ TFJ(ObjectPrototypeToLocaleString, 0) \
CPP(ObjectSeal) \
- CPP(ObjectValues) \
+ TFJ(ObjectValues, 1, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -771,13 +810,15 @@ namespace internal {
TFJ(PromiseRejectClosure, 1, kValue) \
TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromiseThen, 2, kOnFullfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \
/* ES #sec-promise.prototype.catch */ \
- TFJ(PromiseCatch, 1, kOnRejected) \
+ TFJ(PromisePrototypeCatch, 1, kOnRejected) \
/* ES #sec-fulfillpromise */ \
TFJ(ResolvePromise, 2, kPromise, kValue) \
TFS(PromiseHandleReject, kPromise, kOnReject, kException) \
- TFJ(PromiseHandle, 5, kValue, kHandler, kDeferredPromise, \
+ TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \
+ kDeferredOnReject) \
+ TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \
kDeferredOnResolve, kDeferredOnReject) \
/* ES #sec-promise.resolve */ \
TFJ(PromiseResolveWrapper, 1, kValue) \
@@ -785,7 +826,7 @@ namespace internal {
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
- TFJ(PromiseFinally, 1, kOnFinally) \
+ TFJ(PromisePrototypeFinally, 1, kOnFinally) \
TFJ(PromiseThenFinally, 1, kValue) \
TFJ(PromiseCatchFinally, 1, kReason) \
TFJ(PromiseValueThunkFinally, 0) \
@@ -799,6 +840,8 @@ namespace internal {
TFJ(ProxyConstructor, 0) \
TFJ(ProxyConstructor_ConstructStub, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ProxyRevocable, 2, kTarget, kHandler) \
+ TFJ(ProxyRevoke, 0) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
TFS(ProxyHasProperty, kProxy, kName) \
TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 0b98a7169b..7c1db5093d 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -6,7 +6,6 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 4d85be9f91..48c28ab730 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -23,17 +23,6 @@ TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
Return(LoadStringLengthAsSmi(string));
}
-TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
- vector);
-}
-
TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -52,18 +41,6 @@ void Builtins::Generate_StoreIC_Uninitialized(
StoreICUninitializedGenerator::Generate(state);
}
-TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, name);
-}
-
TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -78,15 +55,6 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
receiver, name);
}
-TF_BUILTIN(LoadGlobalIC_Miss, CodeStubAssembler) {
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot, vector);
-}
-
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
@@ -110,16 +78,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
}
-TF_BUILTIN(LoadIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
-}
-
TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
@@ -128,18 +86,6 @@ TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
- receiver, name);
-}
-
TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 536a7f31ed..94613a6a32 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -29,6 +29,8 @@ IC_BUILTIN(LoadField)
IC_BUILTIN(KeyedLoadICTrampoline)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
IC_BUILTIN(KeyedLoadIC_PolymorphicName)
+IC_BUILTIN(StoreGlobalIC)
+IC_BUILTIN(StoreGlobalICTrampoline)
IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
@@ -40,8 +42,6 @@ IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadICProtoArray, LoadICProtoArray, false)
-IC_BUILTIN_PARAM(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray, true)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bc9723700c..bb4b66e3a4 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
@@ -13,6 +14,9 @@
namespace v8 {
namespace internal {
+template <typename T>
+using TNode = compiler::TNode<T>;
+
// -----------------------------------------------------------------------------
// Interrupt and stack checks.
@@ -583,7 +587,7 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
CSA_ASSERT(this, IsString(key));
Label if_true(this), if_false(this);
- Node* result = HasProperty(object, key, context, kForInHasProperty);
+ TNode<Oddball> result = HasProperty(object, key, context, kForInHasProperty);
Branch(IsTrue(result), &if_true, &if_false);
BIND(&if_true);
@@ -607,5 +611,448 @@ TF_BUILTIN(SameValue, CodeStubAssembler) {
Return(FalseConstant());
}
+class InternalBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<IntPtrT> GetPendingMicrotaskCount();
+ void SetPendingMicrotaskCount(TNode<IntPtrT> count);
+
+ TNode<FixedArray> GetMicrotaskQueue();
+ void SetMicrotaskQueue(TNode<FixedArray> queue);
+
+ TNode<Context> GetCurrentContext();
+ void SetCurrentContext(TNode<Context> context);
+
+ void EnterMicrotaskContext(TNode<Context> context);
+ void LeaveMicrotaskContext();
+
+ TNode<Object> GetPendingException() {
+ auto ref = ExternalReference(kPendingExceptionAddress, isolate());
+ return TNode<Object>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+ }
+ void ClearPendingException() {
+ auto ref = ExternalReference(kPendingExceptionAddress, isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ TheHoleConstant());
+ }
+
+ TNode<Object> GetScheduledException() {
+ auto ref = ExternalReference::scheduled_exception_address(isolate());
+ return TNode<Object>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+ }
+ void ClearScheduledException() {
+ auto ref = ExternalReference::scheduled_exception_address(isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ TheHoleConstant());
+ }
+};
+
+TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount() {
+ auto ref = ExternalReference::pending_microtask_count_address(isolate());
+ if (kIntSize == 8) {
+ return TNode<IntPtrT>::UncheckedCast(
+ Load(MachineType::Int64(), ExternalConstant(ref)));
+ } else {
+ Node* const value = Load(MachineType::Int32(), ExternalConstant(ref));
+ return ChangeInt32ToIntPtr(value);
+ }
+}
+
+void InternalBuiltinsAssembler::SetPendingMicrotaskCount(TNode<IntPtrT> count) {
+ auto ref = ExternalReference::pending_microtask_count_address(isolate());
+ auto rep = kIntSize == 8 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kWord32;
+ if (kIntSize == 4 && kPointerSize == 8) {
+ Node* const truncated_count =
+ TruncateInt64ToInt32(TNode<Int64T>::UncheckedCast(count));
+ StoreNoWriteBarrier(rep, ExternalConstant(ref), truncated_count);
+ } else {
+ StoreNoWriteBarrier(rep, ExternalConstant(ref), count);
+ }
+}
+
+TNode<FixedArray> InternalBuiltinsAssembler::GetMicrotaskQueue() {
+ return TNode<FixedArray>::UncheckedCast(
+ LoadRoot(Heap::kMicrotaskQueueRootIndex));
+}
+
+void InternalBuiltinsAssembler::SetMicrotaskQueue(TNode<FixedArray> queue) {
+ StoreRoot(Heap::kMicrotaskQueueRootIndex, queue);
+}
+
+TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() {
+ auto ref = ExternalReference(kContextAddress, isolate());
+ return TNode<Context>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+}
+
+void InternalBuiltinsAssembler::SetCurrentContext(TNode<Context> context) {
+ auto ref = ExternalReference(kContextAddress, isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ context);
+}
+
+void InternalBuiltinsAssembler::EnterMicrotaskContext(
+ TNode<Context> microtask_context) {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+ Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ StoreNoWriteBarrier(
+ MachineType::PointerRepresentation(), hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
+ BitcastTaggedToWord(microtask_context));
+
+ // Load mirrored std::vector length from
+ // HandleScopeImplementer::entered_contexts_count_
+ auto type = kSizetSize == 8 ? MachineType::Uint64() : MachineType::Uint32();
+ Node* entered_contexts_length = Load(
+ type, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kEnteredContextsCount));
+
+ auto rep = kSizetSize == 8 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kWord32;
+
+ StoreNoWriteBarrier(
+ rep, hsi,
+ IntPtrConstant(
+ HandleScopeImplementerOffsets::kEnteredContextCountDuringMicrotasks),
+ entered_contexts_length);
+}
+
+void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+
+ Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+ StoreNoWriteBarrier(
+ MachineType::PointerRepresentation(), hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
+ IntPtrConstant(0));
+ if (kSizetSize == 4) {
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::
+ kEnteredContextCountDuringMicrotasks),
+ Int32Constant(0));
+ } else {
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord64, hsi,
+ IntPtrConstant(HandleScopeImplementerOffsets::
+ kEnteredContextCountDuringMicrotasks),
+ Int64Constant(0));
+ }
+}
+
+TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
+ Node* microtask = Parameter(Descriptor::kMicrotask);
+
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1));
+ TNode<FixedArray> queue = GetMicrotaskQueue();
+ TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue);
+
+ Label if_append(this), if_grow(this), done(this);
+ Branch(WordEqual(num_tasks, queue_length), &if_grow, &if_append);
+
+ BIND(&if_grow);
+ {
+ // Determine the new queue length and check if we need to allocate
+ // in large object space (instead of just going to new space, where
+ // we also know that we don't need any write barriers for setting
+ // up the new queue object).
+ Label if_newspace(this), if_lospace(this, Label::kDeferred);
+ TNode<IntPtrT> new_queue_length =
+ IntPtrMax(IntPtrConstant(8), IntPtrAdd(num_tasks, num_tasks));
+ Branch(IntPtrLessThanOrEqual(new_queue_length,
+ IntPtrConstant(FixedArray::kMaxRegularLength)),
+ &if_newspace, &if_lospace);
+
+ BIND(&if_newspace);
+ {
+ // This is the likely case where the new queue fits into new space,
+ // and thus we don't need any write barriers for initializing it.
+ TNode<FixedArray> new_queue =
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
+ CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(new_queue, num_tasks, microtask,
+ SKIP_WRITE_BARRIER);
+ FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
+ new_queue_length, Heap::kUndefinedValueRootIndex);
+ SetMicrotaskQueue(new_queue);
+ Goto(&done);
+ }
+
+ BIND(&if_lospace);
+ {
+ // The fallback case where the new queue ends up in large object space.
+ TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
+ PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation));
+ CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
+ StoreFixedArrayElement(new_queue, num_tasks, microtask);
+ FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
+ new_queue_length, Heap::kUndefinedValueRootIndex);
+ SetMicrotaskQueue(new_queue);
+ Goto(&done);
+ }
+ }
+
+ BIND(&if_append);
+ {
+ StoreFixedArrayElement(queue, num_tasks, microtask);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ SetPendingMicrotaskCount(new_num_tasks);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
+ Label init_queue_loop(this);
+
+ Goto(&init_queue_loop);
+ BIND(&init_queue_loop);
+ {
+ TVARIABLE(IntPtrT, index, IntPtrConstant(0));
+ Label loop(this, &index);
+
+ TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
+ ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
+
+ TNode<FixedArray> queue = GetMicrotaskQueue();
+
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+ LoadAndUntagFixedArrayBaseLength(queue), num_tasks));
+ CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
+
+ SetPendingMicrotaskCount(IntPtrConstant(0));
+ SetMicrotaskQueue(
+ TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant()));
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ TNode<HeapObject> microtask =
+ TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index));
+ index = IntPtrAdd(index, IntPtrConstant(1));
+
+ CSA_ASSERT(this, TaggedIsNotSmi(microtask));
+
+ TNode<Map> microtask_map = LoadMap(microtask);
+ TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
+
+ Label is_call_handler_info(this);
+ Label is_function(this);
+ Label is_promise_resolve_thenable_job(this);
+ Label is_promise_reaction_job(this);
+ Label is_unreachable(this);
+
+ int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo
+ JS_FUNCTION_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+ PROMISE_REACTION_JOB_INFO_TYPE};
+
+ Label* case_labels[] = {&is_call_handler_info, &is_function,
+ &is_promise_resolve_thenable_job,
+ &is_promise_reaction_job};
+
+ static_assert(arraysize(case_values) == arraysize(case_labels), "");
+ Switch(microtask_type, &is_unreachable, case_values, case_labels,
+ arraysize(case_labels));
+
+ BIND(&is_call_handler_info);
+ {
+ // Bailout to C++ slow path for the remainder of the loop.
+ auto index_ref =
+ ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate());
+ auto count_ref =
+ ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate());
+ auto rep = kIntSize == 4 ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+
+ // index was pre-incremented, decrement for bailout to C++.
+ Node* value = IntPtrSub(index, IntPtrConstant(1));
+
+ if (kPointerSize == 4) {
+ DCHECK_EQ(kIntSize, 4);
+ StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
+ StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks);
+ } else {
+ Node* count = num_tasks;
+ if (kIntSize == 4) {
+ value = TruncateInt64ToInt32(value);
+ count = TruncateInt64ToInt32(count);
+ }
+ StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
+ StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count);
+ }
+
+ Return(queue);
+ }
+
+ BIND(&is_function);
+ {
+ Label cont(this);
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> fn_context = TNode<Context>::UncheckedCast(
+ LoadObjectField(microtask, JSFunction::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(fn_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(fn_context);
+ Node* const call = CallJS(CodeFactory::Call(isolate()), native_context,
+ microtask, UndefinedConstant());
+ GotoIfException(call, &cont);
+ Goto(&cont);
+ BIND(&cont);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&is_promise_resolve_thenable_job);
+ {
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> microtask_context =
+ TNode<Context>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(microtask_context);
+
+ Label if_unhandled_exception(this), done(this);
+ Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob,
+ native_context, microtask);
+ GotoIfException(ret, &if_unhandled_exception, &exception);
+ Goto(&done);
+
+ BIND(&if_unhandled_exception);
+ CallRuntime(Runtime::kReportMessage, native_context, exception.value());
+ Goto(&done);
+
+ BIND(&done);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&is_promise_reaction_job);
+ {
+ Label if_multiple(this);
+ Label if_single(this);
+
+ Node* const value =
+ LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset);
+ Node* const tasks =
+ LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset);
+ Node* const deferred_promises = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredPromiseOffset);
+ Node* const deferred_on_resolves = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset);
+ Node* const deferred_on_rejects = LoadObjectField(
+ microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset);
+
+ TNode<Context> old_context = GetCurrentContext();
+ TNode<Context> microtask_context = TNode<Context>::UncheckedCast(
+ LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset));
+ TNode<Context> native_context =
+ TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ SetCurrentContext(native_context);
+ EnterMicrotaskContext(microtask_context);
+
+ Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single);
+
+ BIND(&if_single);
+ {
+ CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks,
+ deferred_promises, deferred_on_resolves,
+ deferred_on_rejects);
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+
+ BIND(&if_multiple);
+ {
+ TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0));
+ TNode<IntPtrT> inner_length =
+ LoadAndUntagFixedArrayBaseLength(deferred_promises);
+ Label inner_loop(this, &inner_index), done(this);
+
+ CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0)));
+ Goto(&inner_loop);
+ BIND(&inner_loop);
+ {
+ Node* const task = LoadFixedArrayElement(tasks, inner_index);
+ Node* const deferred_promise =
+ LoadFixedArrayElement(deferred_promises, inner_index);
+ Node* const deferred_on_resolve =
+ LoadFixedArrayElement(deferred_on_resolves, inner_index);
+ Node* const deferred_on_reject =
+ LoadFixedArrayElement(deferred_on_rejects, inner_index);
+ CallBuiltin(Builtins::kPromiseHandle, native_context, value, task,
+ deferred_promise, deferred_on_resolve,
+ deferred_on_reject);
+ inner_index = IntPtrAdd(inner_index, IntPtrConstant(1));
+ Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop,
+ &done);
+ }
+ BIND(&done);
+
+ LeaveMicrotaskContext();
+ SetCurrentContext(old_context);
+
+ Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ }
+ }
+
+ BIND(&is_unreachable);
+ Unreachable();
+ }
+ }
+}
+
+TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) {
+ VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Callable call = CodeFactory::Call(isolate());
+ Label reject_promise(this, Label::kDeferred);
+ TNode<PromiseResolveThenableJobInfo> microtask =
+ TNode<PromiseResolveThenableJobInfo>::UncheckedCast(
+ Parameter(Descriptor::kMicrotask));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+
+ TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kThenableOffset));
+ TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast(
+ LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset));
+ TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField(
+ microtask, PromiseResolveThenableJobInfo::kResolveOffset));
+ TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast(
+ LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset));
+
+ Node* const result = CallJS(call, context, then, thenable, resolve, reject);
+ GotoIfException(result, &reject_promise, &exception);
+ Return(UndefinedConstant());
+
+ BIND(&reject_promise);
+ CallJS(call, context, reject, UndefinedConstant(), exception.value());
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(AbortJS, CodeStubAssembler) {
+ Node* message = Parameter(Descriptor::kObject);
+ Node* reason = SmiConstant(0);
+ TailCallRuntime(Runtime::kAbortJS, reason, message);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 3c7956246b..88641b04e2 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -8,7 +8,6 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index f186cf2d76..f6a6d85880 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -11,9 +11,10 @@ namespace internal {
using compiler::Node;
-Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
- Label* if_exception,
- Variable* exception) {
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
+ Node* object,
+ Label* if_exception,
+ Variable* exception) {
Node* method = GetProperty(context, object, factory()->iterator_symbol());
GotoIfException(method, if_exception, exception);
@@ -21,9 +22,9 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
Node* iterator = CallJS(callable, context, method, object);
GotoIfException(iterator, if_exception, exception);
- Label done(this), if_notobject(this, Label::kDeferred);
+ Label get_next(this), if_notobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iterator), &if_notobject);
- Branch(IsJSReceiver(iterator), &done, &if_notobject);
+ Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
BIND(&if_notobject);
{
@@ -34,24 +35,21 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
Unreachable();
}
- BIND(&done);
- return iterator;
+ BIND(&get_next);
+ Node* const next = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next, if_exception, exception);
+
+ return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
+ TNode<Object>::UncheckedCast(next)};
}
-Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
- Label* if_done,
- Node* fast_iterator_result_map,
- Label* if_exception,
- Variable* exception) {
+Node* IteratorBuiltinsAssembler::IteratorStep(
+ Node* context, const IteratorRecord& iterator, Label* if_done,
+ Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
DCHECK_NOT_NULL(if_done);
-
- // IteratorNext
- Node* next_method = GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next_method, if_exception, exception);
-
// 1. a. Let result be ? Invoke(iterator, "next", « »).
Callable callable = CodeFactory::Call(isolate());
- Node* result = CallJS(callable, context, next_method, iterator);
+ Node* result = CallJS(callable, context, iterator.next, iterator.object);
GotoIfException(result, if_exception, exception);
// 3. If Type(result) is not Object, throw a TypeError exception.
@@ -129,20 +127,20 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
return var_value.value();
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
- Node* iterator,
- Label* if_exception,
- Variable* exception) {
+void IteratorBuiltinsAssembler::IteratorCloseOnException(
+ Node* context, const IteratorRecord& iterator, Label* if_exception,
+ Variable* exception) {
// Perform ES #sec-iteratorclose when an exception occurs. This simpler
// algorithm does not include redundant steps which are never reachable from
// the spec IteratorClose algorithm.
DCHECK_NOT_NULL(if_exception);
DCHECK_NOT_NULL(exception);
CSA_ASSERT(this, IsNotTheHole(exception->value()));
- CSA_ASSERT(this, IsJSReceiver(iterator));
+ CSA_ASSERT(this, IsJSReceiver(iterator.object));
// Let return be ? GetMethod(iterator, "return").
- Node* method = GetProperty(context, iterator, factory()->return_string());
+ Node* method =
+ GetProperty(context, iterator.object, factory()->return_string());
GotoIfException(method, if_exception, exception);
// If return is undefined, return Completion(completion).
@@ -152,7 +150,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
// Let innerResult be Call(return, iterator, « »).
// If an exception occurs, the original exception remains bound
Node* inner_result =
- CallJS(CodeFactory::Call(isolate()), context, method, iterator);
+ CallJS(CodeFactory::Call(isolate()), context, method, iterator.object);
GotoIfException(inner_result, if_exception, nullptr);
// (If completion.[[Type]] is throw) return Completion(completion).
@@ -160,9 +158,8 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
}
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
- Node* iterator,
- Variable* exception) {
+void IteratorBuiltinsAssembler::IteratorCloseOnException(
+ Node* context, const IteratorRecord& iterator, Variable* exception) {
Label rethrow(this, Label::kDeferred);
IteratorCloseOnException(context, iterator, &rethrow, exception);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 9eb332e926..42627b8437 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -19,16 +19,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
- Node* GetIterator(Node* context, Node* object, Label* if_exception = nullptr,
- Variable* exception = nullptr);
+ IteratorRecord GetIterator(Node* context, Node* object,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// Returns `false` if the iterator is done, otherwise returns an
// iterator result.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- Node* IteratorStep(Node* context, Node* iterator, Label* if_done,
- Node* fast_iterator_result_map = nullptr,
+ Node* IteratorStep(Node* context, const IteratorRecord& iterator,
+ Label* if_done, Node* fast_iterator_result_map = nullptr,
Label* if_exception = nullptr,
Variable* exception = nullptr);
@@ -42,9 +43,9 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorclose
- void IteratorCloseOnException(Node* context, Node* iterator,
+ void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Label* if_exception, Variable* exception);
- void IteratorCloseOnException(Node* context, Node* iterator,
+ void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Variable* exception);
};
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 706fa4f3a8..d588113cdd 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -8,7 +8,6 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
-#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
namespace v8 {
namespace internal {
@@ -162,7 +161,7 @@ void MathBuiltinsAssembler::MathMaxMin(
SloppyTNode<Float64T>),
double default_val) {
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- argc = arguments.GetLength();
+ argc = arguments.GetLength(INTPTR_PARAMETERS);
VARIABLE(result, MachineRepresentation::kFloat64);
result.Bind(Float64Constant(default_val));
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 65170d321d..9e344820dc 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -16,6 +16,8 @@ namespace internal {
// ES6 section 19.1 Object Objects
typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
Node* enumerable, Node* configurable);
Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+
+ Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
+};
+
+class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
+ public:
+ explicit ObjectEntriesValuesBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : ObjectBuiltinsAssembler(state) {}
+
+ protected:
+ enum CollectType { kEntries, kValues };
+
+ TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+
+ TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details);
+
+ TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind);
+
+ TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
+
+ TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
+
+ TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
+ return DecodeWord32<PropertyDetails::KindField>(details);
+ }
+
+ void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type);
+
+ void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
+ TNode<JSArray> FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type);
+
+ TNode<JSArray> FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> values_or_entries,
+ TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -97,6 +139,265 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
return js_desc;
}
+Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+TNode<Word32T>
+ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind(
+ TNode<Map> map) {
+ Node* kind = LoadMapElementsKind(map);
+ return Word32Or(
+ Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
+ TNode<Uint32T> details) {
+ TNode<Uint32T> attributes =
+ DecodeWord32<PropertyDetails::AttributesField>(details);
+ return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM);
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+}
+
+TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
+ TNode<Map> map) {
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
+ TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type) {
+ TNode<JSObject> object = TNode<JSObject>::UncheckedCast(
+ CallBuiltin(Builtins::kToObject, context, maybe_object));
+
+ Label if_call_runtime_with_fast_path(this, Label::kDeferred),
+ if_call_runtime(this, Label::kDeferred),
+ if_no_properties(this, Label::kDeferred);
+
+ TNode<Map> map = LoadMap(object);
+ GotoIfNot(IsJSObjectMap(map), &if_call_runtime);
+ GotoIfMapHasSlowProperties(map, &if_call_runtime);
+
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ // If the object has elements, we treat it as slow case.
+ // So, we go to runtime call.
+ GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path);
+
+ TNode<JSArray> result = FastGetOwnValuesOrEntries(
+ context, object, &if_call_runtime_with_fast_path, &if_no_properties,
+ collect_type);
+ Return(result);
+
+ BIND(&if_no_properties);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Return(empty_array);
+ }
+
+ BIND(&if_call_runtime_with_fast_path);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntries, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValues, context, object));
+ }
+ }
+
+ BIND(&if_call_runtime);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValuesSkipFastPath, context, object));
+ }
+ }
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
+ TNode<Map> map, Label* if_slow) {
+ GotoIf(IsStringWrapperElementsKind(map), if_slow);
+ GotoIf(IsSpecialReceiverMap(map), if_slow);
+ GotoIf(HasHiddenPrototype(map), if_slow);
+ GotoIf(IsDictionaryMap(map), if_slow);
+}
+
+TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type) {
+ Node* native_context = LoadNativeContext(context);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Map> map = LoadMap(object);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+
+ Label if_has_enum_cache(this), if_not_has_enum_cache(this),
+ collect_entries(this);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+ Node* has_enum_cache = WordNotEqual(
+ object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
+
+ // In case, we found enum_cache in object,
+ // we use it as array_length becuase it has same size for
+ // Object.(entries/values) result array object length.
+ // So object_enum_length use less memory space than
+ // NumberOfOwnDescriptorsBits value.
+ // And in case, if enum_cache_not_found,
+ // we call runtime and initialize enum_cache for subsequent call of
+ // CSA fast path.
+ Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path);
+
+ BIND(&if_has_enum_cache);
+ {
+ GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
+ TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
+ AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ // If in case we have enum_cache,
+ // we can't detect accessor of object until loop through descritpros.
+ // So if object might have accessor,
+ // we will remain invalid addresses of FixedArray.
+ // Because in that case, we need to jump to runtime call.
+ // So the array filled by the-hole even if enum_cache exists.
+ FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
+ IntPtrConstant(0), object_enum_length,
+ Heap::kTheHoleValueRootIndex);
+
+ TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_descriptor_index, IntPtrConstant(0));
+ Variable* vars[] = {&var_descriptor_index, &var_result_index};
+ // Let desc be ? O.[[GetOwnProperty]](key).
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label loop(this, 2, vars), after_loop(this), loop_condition(this);
+ Branch(IntPtrEqual(var_descriptor_index, object_enum_length), &after_loop,
+ &loop);
+
+ // We dont use BuildFastLoop.
+ // Instead, we use hand-written loop
+ // because of we need to use 'continue' functionality.
+ BIND(&loop);
+ {
+ // Currently, we will not invoke getters,
+ // so, map will not be changed.
+ CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+ TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
+ TruncateWordToWord32(var_descriptor_index));
+ Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+
+ // Skip Symbols.
+ GotoIf(IsSymbol(next_key), &loop_condition);
+
+ TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
+ DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> kind = LoadPropertyKind(details);
+
+ // If property is accessor, we escape fast path and call runtime.
+ GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
+ CSA_ASSERT(this, IsPropertyKindData(kind));
+
+ // If desc is not undefined and desc.[[Enumerable]] is true, then
+ GotoIfNot(IsPropertyEnumerable(details), &loop_condition);
+
+ VARIABLE(var_property_value, MachineRepresentation::kTagged,
+ UndefinedConstant());
+ Node* descriptor_name_index = DescriptorNumberToIndex(descriptor_index);
+
+ // Let value be ? Get(O, key).
+ LoadPropertyFromFastObject(object, map, descriptors,
+ descriptor_name_index, details,
+ &var_property_value);
+
+ // If kind is "value", append value to properties.
+ Node* value = var_property_value.value();
+
+ if (collect_type == CollectType::kEntries) {
+ // Let entry be CreateArrayFromList(« key, value »).
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
+ IntPtrConstant(2));
+ StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ value = array;
+ }
+
+ StoreFixedArrayElement(values_or_entries, var_result_index, value);
+ Increment(&var_result_index, 1);
+ Goto(&loop_condition);
+
+ BIND(&loop_condition);
+ {
+ Increment(&var_descriptor_index, 1);
+ Branch(IntPtrEqual(var_descriptor_index, object_enum_length),
+ &after_loop, &loop);
+ }
+ }
+ BIND(&after_loop);
+ return FinalizeValuesOrEntriesJSArray(context, values_or_entries,
+ var_result_index, array_map,
+ if_no_properties);
+ }
+}
+
+TNode<JSArray>
+ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
+ TNode<Map> array_map, Label* if_empty) {
+ CSA_ASSERT(this, IsJSArrayMap(array_map));
+
+ GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, SmiTag(size), nullptr);
+ StoreObjectField(array, JSArray::kElementsOffset, result);
+ return TNode<JSArray>::UncheckedCast(array);
+}
+
+TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+
+ Label if_null_or_undefined(this, Label::kDeferred);
+ GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
+
+ TNode<Object> method =
+ CAST(GetProperty(context, receiver, factory()->toString_string()));
+ Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
+
+ BIND(&if_null_or_undefined);
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ "Object.prototype.toLocaleString");
+}
+
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kKey);
@@ -250,6 +551,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kValues);
+}
+
+TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kEntries);
+}
+
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -550,7 +867,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
GotoIf(IsNull(holder), &return_default);
Node* holder_map = LoadMap(holder);
Node* holder_bit_field3 = LoadMapBitField3(holder_map);
- GotoIf(IsSetWord32<Map::MayHaveInterestingSymbols>(holder_bit_field3),
+ GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3),
&return_generic);
var_holder.Bind(LoadMapPrototype(holder_map));
Goto(&loop);
@@ -615,7 +932,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
&call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
Node* bit_field3 = LoadMapBitField3(properties_map);
- GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+ GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &call_runtime);
Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
&call_runtime, &no_properties);
}
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 36f7ebfc0a..4e353b9260 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
BUILTIN(ObjectGetOwnPropertyDescriptors) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 67ebc85ba4..1a3ebcd892 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -161,12 +161,12 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
GotoIf(TaggedIsSmi(resolve), &if_notcallable);
- GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+ GotoIfNot(IsCallable(resolve), &if_notcallable);
Node* reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
- GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable);
+ GotoIfNot(IsCallable(reject), &if_notcallable);
StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
@@ -189,25 +189,6 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
return var_result.value();
}
-void PromiseBuiltinsAssembler::InitializeFunctionContext(Node* native_context,
- Node* context,
- int slots) {
- DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
- StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
- StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
- SmiConstant(slots));
-
- Node* const empty_fn =
- LoadContextElement(native_context, Context::CLOSURE_INDEX);
- StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
- StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
- UndefinedConstant());
- StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
- TheHoleConstant());
- StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
- native_context);
-}
-
Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
@@ -366,8 +347,6 @@ Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
- GotoIfForceSlowPath(&promise_capability);
-
Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
&promise_capability);
@@ -415,16 +394,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
append_callbacks(this);
GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
- Isolate* isolate = this->isolate();
- Node* const on_resolve_map = LoadMap(on_resolve);
- Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
- &if_onresolvenotcallable);
+ Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable);
BIND(&if_onresolvenotcallable);
{
- Node* const default_resolve_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_resolve_handler_symbol());
- var_on_resolve.Bind(default_resolve_handler_symbol);
+ var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant());
Goto(&onrejectcheck);
}
@@ -433,15 +407,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Label if_onrejectnotcallable(this);
GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
- Node* const on_reject_map = LoadMap(on_reject);
- Branch(IsCallableMap(on_reject_map), &append_callbacks,
- &if_onrejectnotcallable);
+ Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable);
BIND(&if_onrejectnotcallable);
{
- Node* const default_reject_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_reject_handler_symbol());
- var_on_reject.Bind(default_reject_handler_symbol);
+ var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant());
Goto(&append_callbacks);
}
}
@@ -558,8 +528,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Node* info = AllocatePromiseReactionJobInfo(
result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
deferred_on_reject, context);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
BIND(&reject);
@@ -578,8 +547,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
Node* info = AllocatePromiseReactionJobInfo(
result, var_on_reject.value(), deferred_promise,
deferred_on_resolve, deferred_on_reject, context);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
}
}
@@ -787,8 +755,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
// 12. Perform EnqueueJob("PromiseJobs",
// PromiseResolveThenableJob, « promise, resolution, thenAction»).
BIND(&enqueue);
- // TODO(gsathya): Move this to TF
- CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&out);
}
@@ -846,7 +813,7 @@ void PromiseBuiltinsAssembler::PromiseFulfill(
result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
context);
- CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
Goto(&do_promisereset);
BIND(&do_promisereset);
@@ -1080,19 +1047,18 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
Node *resolve, *reject;
std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
var_result.value(), TrueConstant(), native_context);
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const maybe_exception = CallJS(call_callable, context, executor,
- UndefinedConstant(), resolve, reject);
+ Node* const maybe_exception = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, executor, UndefinedConstant(), resolve, reject);
GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
Branch(is_debug_active, &debug_pop, &out);
BIND(&if_rejectpromise);
{
- Callable call_callable = CodeFactory::Call(isolate);
- CallJS(call_callable, context, reject, UndefinedConstant(),
- var_reason.value());
+ CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_reason.value());
Branch(is_debug_active, &debug_pop, &out);
}
@@ -1130,8 +1096,8 @@ TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
}
// ES#sec-promise.prototype.then
-// Promise.prototype.catch ( onFulfilled, onRejected )
-TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
+// Promise.prototype.then ( onFulfilled, onRejected )
+TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
Node* const on_resolve = Parameter(Descriptor::kOnFullfilled);
@@ -1169,7 +1135,6 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
Node* const exception = Parameter(Descriptor::kException);
Node* const context = Parameter(Descriptor::kContext);
- Callable call_callable = CodeFactory::Call(isolate());
VARIABLE(var_unused, MachineRepresentation::kTagged);
Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
@@ -1183,7 +1148,15 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
BIND(&if_customhandler);
{
- CallJS(call_callable, context, on_reject, UndefinedConstant(), exception);
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Label if_exception(this);
+ Node* const ret = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_reject, UndefinedConstant(), exception);
+ GotoIfException(ret, &if_exception, &var_exception);
+ Return(UndefinedConstant());
+ BIND(&if_exception);
+ CallRuntime(Runtime::kReportMessage, context, var_exception.value());
Return(UndefinedConstant());
}
}
@@ -1225,9 +1198,7 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_defaulthandler);
{
Label if_resolve(this), if_reject(this);
- Node* const default_resolve_handler_symbol = HeapConstant(
- isolate->factory()->promise_default_resolve_handler_symbol());
- Branch(WordEqual(default_resolve_handler_symbol, handler), &if_resolve,
+ Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve,
&if_reject);
BIND(&if_resolve);
@@ -1246,9 +1217,9 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_callablehandler);
{
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const result =
- CallJS(call_callable, context, handler, UndefinedConstant(), value);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, handler, UndefinedConstant(), value);
var_result.Bind(result);
GotoIfException(result, &if_rejectpromise, &var_reason);
Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
@@ -1261,10 +1232,10 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_customhandler);
{
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const maybe_exception =
- CallJS(call_callable, context, deferred_on_resolve,
- UndefinedConstant(), var_result.value());
+ Node* const maybe_exception = CallJS(
+ CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, deferred_on_resolve, UndefinedConstant(),
+ var_result.value());
GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
Goto(&promisehook_after);
}
@@ -1297,9 +1268,23 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
}
}
+TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
+ Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
+ Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseHandle, context, value, handler,
+ deferred_promise, deferred_on_resolve, deferred_on_reject);
+ Return(result);
+}
+
// ES#sec-promise.prototype.catch
// Promise.prototype.catch ( onRejected )
-TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
Node* const on_resolve = UndefinedConstant();
@@ -1321,9 +1306,9 @@ TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
{
Node* const then =
GetProperty(context, promise, isolate()->factory()->then_string());
- Callable call_callable = CodeFactory::Call(isolate());
- Node* const result =
- CallJS(call_callable, context, then, promise, on_resolve, on_reject);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, promise, on_resolve, on_reject);
Return(result);
}
}
@@ -1407,10 +1392,10 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
{
Node* const capability = NewPromiseCapability(context, constructor);
- Callable call_callable = CodeFactory::Call(isolate);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(call_callable, context, resolve, UndefinedConstant(), value);
+ CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
Node* const result =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -1468,8 +1453,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
- GotoIfForceSlowPath(&if_custompromise);
-
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
@@ -1492,8 +1475,8 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable call_callable = CodeFactory::Call(isolate());
- CallJS(call_callable, context, reject, UndefinedConstant(), reason);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), reason);
// 5. Return promiseCapability.[[Promise]].
Node* const promise =
@@ -1567,9 +1550,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, IsCallable(on_finally));
// 3. Let result be ? Call(onFinally).
- Callable call_callable = CodeFactory::Call(isolate());
- Node* const result =
- CallJS(call_callable, context, on_finally, UndefinedConstant());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
Node* const constructor = LoadContextElement(context, kConstructorSlot);
@@ -1588,8 +1571,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
// 8. Return ? Invoke(promise, "then", « valueThunk »).
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(call_callable, context,
- promise_then, promise, value_thunk);
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, value_thunk);
Return(result_promise);
}
@@ -1628,9 +1612,9 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, IsCallable(on_finally));
// 3. Let result be ? Call(onFinally).
- Callable call_callable = CodeFactory::Call(isolate());
- Node* result =
- CallJS(call_callable, context, on_finally, UndefinedConstant());
+ Node* result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
Node* const constructor = LoadContextElement(context, kConstructorSlot);
@@ -1649,12 +1633,13 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
// 8. Return ? Invoke(promise, "then", « thrower »).
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(call_callable, context,
- promise_then, promise, thrower);
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, thrower);
Return(result_promise);
}
-TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
// 1. Let promise be the this value.
@@ -1662,9 +1647,9 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
Node* const on_finally = Parameter(Descriptor::kOnFinally);
Node* const context = Parameter(Descriptor::kContext);
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
- "Promise.prototype.finally");
+ // 2. If Type(promise) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject,
+ "Promise.prototype.finally");
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Node* const native_context = LoadNativeContext(context);
@@ -1714,9 +1699,10 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
BIND(&perform_finally);
Node* const promise_then =
GetProperty(context, promise, factory()->then_string());
- Node* const result_promise =
- CallJS(CodeFactory::Call(isolate()), context, promise_then, promise,
- var_then_finally.value(), var_catch_finally.value());
+ Node* const result_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_then, promise, var_then_finally.value(),
+ var_catch_finally.value());
Return(result_promise);
}
@@ -1758,8 +1744,9 @@ TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
}
Node* PromiseBuiltinsAssembler::PerformPromiseAll(
- Node* context, Node* constructor, Node* capability, Node* iterator,
- Label* if_exception, Variable* var_exception) {
+ Node* context, Node* constructor, Node* capability,
+ const IteratorRecord& iterator, Label* if_exception,
+ Variable* var_exception) {
IteratorBuiltinsAssembler iter_assembler(state());
Label close_iterator(this);
@@ -1805,8 +1792,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
GetProperty(context, constructor, factory()->resolve_string());
GotoIfException(promise_resolve, &close_iterator, var_exception);
- Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
- promise_resolve, constructor, next_value);
+ Node* const next_promise = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_resolve, constructor, next_value);
GotoIfException(next_promise, &close_iterator, var_exception);
// Let resolveElement be a new built-in function object as defined in
@@ -1844,7 +1832,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
BIND(&if_outofrange);
{
// If the incremented value is out of Smi range, crash.
- Abort(kOffsetOutOfRange);
+ Abort(AbortReason::kOffsetOutOfRange);
}
BIND(&done);
@@ -1857,7 +1845,8 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
GotoIfException(then, &close_iterator, var_exception);
Node* const then_call = CallJS(
- CodeFactory::Call(isolate()), context, then, next_promise, resolve,
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, next_promise, resolve,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
GotoIfException(then_call, &close_iterator, var_exception);
@@ -1899,9 +1888,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- Node* const resolve_call =
- CallJS(CodeFactory::Call(isolate()), context, resolve,
- UndefinedConstant(), values_array);
+ Node* const resolve_call = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), values_array);
GotoIfException(resolve_call, if_exception, var_exception);
Goto(&return_promise);
@@ -1963,7 +1952,7 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
Node* const iterable = Parameter(Descriptor::kIterable);
- Node* const iterator = iter_assembler.GetIterator(
+ IteratorRecord iterator = iter_assembler.GetIterator(
context, iterable, &reject_promise, &var_exception);
// Let result be PerformPromiseAll(iteratorRecord, C, promiseCapability).
@@ -1982,9 +1971,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, context, reject, UndefinedConstant(),
- var_exception.value());
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_exception.value());
Node* const promise =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -2059,8 +2047,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(),
- values_array);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), values_array);
Return(UndefinedConstant());
BIND(&already_called);
@@ -2101,7 +2089,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
Node* const iterable = Parameter(Descriptor::kIterable);
- Node* const iterator = iter_assembler.GetIterator(
+ IteratorRecord iterator = iter_assembler.GetIterator(
context, iterable, &reject_promise, &var_exception);
// Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
@@ -2134,8 +2122,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
GetProperty(context, receiver, factory()->resolve_string());
GotoIfException(promise_resolve, &close_iterator, &var_exception);
- Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
- promise_resolve, receiver, next_value);
+ Node* const next_promise =
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ context, promise_resolve, receiver, next_value);
GotoIfException(next_promise, &close_iterator, &var_exception);
// Perform ? Invoke(nextPromise, "then", « resolveElement,
@@ -2144,8 +2134,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
GetProperty(context, next_promise, factory()->then_string());
GotoIfException(then, &close_iterator, &var_exception);
- Node* const then_call = CallJS(CodeFactory::Call(isolate()), context,
- then, next_promise, resolve, reject);
+ Node* const then_call =
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ context, then, next_promise, resolve, reject);
GotoIfException(then_call, &close_iterator, &var_exception);
// For catch prediction, mark that rejections here are semantically
@@ -2172,9 +2164,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
{
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, context, reject, UndefinedConstant(),
- var_exception.value());
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), var_exception.value());
Node* const promise =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 759176757f..366c7c22cd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -137,7 +137,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
Label* if_isunmodified, Label* if_ismodified);
- void InitializeFunctionContext(Node* native_context, Node* context, int len);
Node* CreatePromiseContext(Node* native_context, int slots);
void PromiseFulfill(Node* context, Node* promise, Node* result,
v8::Promise::PromiseState status);
@@ -158,7 +157,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunction(Node* reason, Node* native_context);
Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability,
- Node* iterator, Label* if_exception,
+ const IteratorRecord& record, Label* if_exception,
Variable* var_exception);
Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 2d81867d51..64e838d53a 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -126,6 +126,106 @@ TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
}
+Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
+ Node* proxy, Node* native_context) {
+ Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ InitializeFunctionContext(native_context, context, kProxyContextLength);
+ StoreContextElementNoWriteBarrier(context, kProxySlot, proxy);
+ return context;
+}
+
+Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
+ Node* context) {
+ Node* const native_context = LoadNativeContext(context);
+
+ Node* const proxy_context =
+ CreateProxyRevokeFunctionContext(proxy, native_context);
+ Node* const revoke_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const revoke_info =
+ LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
+
+ return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
+ proxy_context);
+}
+
+TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) {
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const native_context = LoadNativeContext(context);
+
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
+
+ Node* const proxy = AllocateProxy(target, handler, context);
+ Node* const revoke = AllocateProxyRevokeFunction(proxy, context);
+
+ Node* const result = Allocate(JSProxyRevocableResult::kSize);
+ Node* const result_map = LoadContextElement(
+ native_context, Context::PROXY_REVOCABLE_RESULT_MAP_INDEX);
+ StoreMapNoWriteBarrier(result, result_map);
+ StoreObjectFieldRoot(result, JSProxyRevocableResult::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSProxyRevocableResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kProxyOffset,
+ proxy);
+ StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kRevokeOffset,
+ revoke);
+ Return(result);
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+}
+
+// Proxy Revocation Functions
+// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
+TF_BUILTIN(ProxyRevoke, ProxiesCodeStubAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // 1. Let p be F.[[RevocableProxy]].
+ Node* const proxy_slot = IntPtrConstant(kProxySlot);
+ Node* const proxy = LoadContextElement(context, proxy_slot);
+
+ Label revoke_called(this);
+
+ // 2. If p is null, ...
+ GotoIf(IsNull(proxy), &revoke_called);
+
+ // 3. Set F.[[RevocableProxy]] to null.
+ StoreContextElement(context, proxy_slot, NullConstant());
+
+ // 4. Assert: p is a Proxy object.
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ // 5. Set p.[[ProxyTarget]] to null.
+ StoreObjectField(proxy, JSProxy::kTargetOffset, NullConstant());
+
+ // 6. Set p.[[ProxyHandler]] to null.
+ StoreObjectField(proxy, JSProxy::kHandlerOffset, NullConstant());
+
+ // 7. Return undefined.
+ Return(UndefinedConstant());
+
+ BIND(&revoke_called);
+ // 2. ... return undefined.
+ Return(UndefinedConstant());
+}
+
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
Node* argc = Parameter(Descriptor::kActualArgumentsCount);
Node* argc_ptr = ChangeInt32ToIntPtr(argc);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 2b2ac54ebe..92b175bfde 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -27,17 +27,26 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
Node* receiver);
protected:
+ enum ProxyRevokeFunctionContextSlot {
+ kProxySlot = Context::MIN_CONTEXT_SLOTS,
+ kProxyContextLength,
+ };
+
void GotoIfRevokedProxy(Node* object, Label* if_proxy_revoked);
Node* AllocateProxy(Node* target, Node* handler, Node* context);
Node* AllocateJSArrayForCodeStubArguments(Node* context,
CodeStubArguments& args, Node* argc,
ParameterMode mode);
+ Node* AllocateProxyRevokeFunction(Node* proxy, Node* context);
void CheckHasTrapResult(Node* context, Node* target, Node* proxy, Node* name,
Label* check_passed, Label* if_bailout);
void CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
Node* name, Node* trap_result, Label* if_not_found,
JSProxy::AccessKind access_kind);
+
+ private:
+ Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 5ce4abd557..4227c628d1 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -62,15 +62,15 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, map);
- Node* const empty_array = EmptyFixedArrayConstant();
- DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset,
- empty_array);
+ EmptyFixedArrayConstant());
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, elements);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
- StoreObjectField(result, JSRegExpResult::kInputOffset, input);
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kInputOffset, input);
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset,
+ UndefinedConstant());
// Initialize the elements.
@@ -223,8 +223,6 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Allocate a new object to store the named capture properties.
// TODO(jgruber): Could be optimized by adding the object map to the heap
// root list.
- // TODO(jgruber): Replace CreateDataProperty runtime calls once we have
- // equivalent functionality in CSA.
Node* const native_context = LoadNativeContext(context);
Node* const map = LoadContextElement(
@@ -233,14 +231,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
AllocateNameDictionary(NameDictionary::kInitialCapacity);
Node* const group_object = AllocateJSObjectFromMap(map, properties);
-
- // Store it on the result as a 'group' property.
-
- {
- Node* const name = HeapConstant(isolate()->factory()->groups_string());
- CallRuntime(Runtime::kCreateDataProperty, context, result, name,
- group_object);
- }
+ StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
// One or more named captures exist, add a property for each one.
@@ -267,6 +258,9 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const capture =
LoadFixedArrayElement(result_elements, SmiUntag(index));
+ // TODO(jgruber): Calling into runtime to create each property is slow.
+ // Either we should create properties entirely in CSA (should be doable),
+ // or only call runtime once and loop there.
CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
capture);
@@ -834,7 +828,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
Label out(this);
VARIABLE(var_result, MachineRepresentation::kWord32);
-#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
var_result.Bind(Int32Constant(0));
GotoIfForceSlowPath(&out);
#endif
@@ -1225,8 +1219,7 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
- Branch(IsFastRegExpNoPrototype(context, receiver, map), &if_isfastpath,
- &if_isslowpath);
+ BranchIfFastRegExp(context, receiver, map, &if_isfastpath, &if_isslowpath);
BIND(&if_isfastpath);
Return(FlagsGetter(context, receiver, true));
@@ -2543,7 +2536,7 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
// to verify the constructor property and jump to the slow path if it has
// been changed.
- // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+ // Verify {maybe_limit}.
VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit);
Label if_limitissmimax(this), runtime(this, Label::kDeferred);
@@ -2552,21 +2545,12 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
- GotoIf(TaggedIsPositiveSmi(maybe_limit), &next);
-
- var_limit.Bind(ToUint32(context, maybe_limit));
- {
- // ToUint32(limit) could potentially change the shape of the RegExp
- // object. Recheck that we are still on the fast path and bail to runtime
- // otherwise.
- {
- Label next(this);
- BranchIfFastRegExp(context, regexp, &next, &runtime);
- BIND(&next);
- }
+ Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime);
- Branch(TaggedIsPositiveSmi(var_limit.value()), &next, &if_limitissmimax);
- }
+ // We need to be extra-strict and require the given limit to be either
+ // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
+ // that might move us onto the slow path, resulting in ordering spec
+ // violations (see https://crbug.com/801171).
BIND(&if_limitissmimax);
{
@@ -2590,13 +2574,8 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
RegExpPrototypeSplitBody(context, regexp, string, var_limit.value());
BIND(&runtime);
- {
- // The runtime call passes in limit to ensure the second ToUint32(limit)
- // call is not observable.
- CSA_ASSERT(this, IsNumber(var_limit.value()));
- Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string,
- var_limit.value()));
- }
+ Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string,
+ var_limit.value()));
}
// ES#sec-regexp.prototype-@@split
@@ -2740,7 +2719,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
TNode<IntPtrT> int_elem = SmiUntag(elem);
TNode<IntPtrT> new_match_start =
Signed(IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
- WordAnd(int_elem, IntPtrConstant(0x7ff))));
+ WordAnd(int_elem, IntPtrConstant(0x7FF))));
var_match_start = SmiTag(new_match_start);
Goto(&loop_epilogue);
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 6122ff85da..278a48c68e 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -213,7 +213,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
#if DEBUG
@@ -266,7 +266,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
@@ -340,8 +340,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* old_value_integer = ToInteger(context, old_value);
- Node* new_value_integer = ToInteger(context, new_value);
+ Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
+ Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
#if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context);
@@ -436,7 +436,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
ValidateAtomicIndex(array, index_word32, context);
- Node* value_integer = ToInteger(context, value);
+ Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
// In Debug mode, we re-validate the index as a sanity check because
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 9d86f3105b..195572de8e 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -126,8 +126,8 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
Node* context, Variable* var_start, Node* start, Node* string_length) {
- TNode<Object> const start_int =
- ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Object> const start_int = ToInteger_Inline(
+ CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero);
TNode<Smi> const zero = SmiConstant(0);
Label done(this);
@@ -319,6 +319,31 @@ void StringBuiltinsAssembler::StringEqual_Loop(
}
}
+void StringBuiltinsAssembler::GenerateStringAt(char const* method_name,
+ TNode<Context> context,
+ Node* receiver,
+ TNode<Object> maybe_position,
+ TNode<Object> default_return,
+ StringAtAccessor accessor) {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ TNode<String> string = ToThisString(context, receiver, method_name);
+
+ // Convert the {position} to a Smi and check that it's in bounds of the
+ // {string}.
+ Label if_outofbounds(this, Label::kDeferred);
+ TNode<Number> position = ToInteger_Inline(
+ context, maybe_position, CodeStubAssembler::kTruncateMinusZero);
+ GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
+ TNode<IntPtrT> index = SmiUntag(CAST(position));
+ TNode<IntPtrT> length = LoadStringLengthAsWord(string);
+ GotoIfNot(UintPtrLessThan(index, length), &if_outofbounds);
+ TNode<Object> result = accessor(string, length, index);
+ Return(result);
+
+ BIND(&if_outofbounds);
+ Return(default_return);
+}
+
void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
Node* left,
Node* right,
@@ -526,28 +551,43 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
Operation::kGreaterThanOrEqual);
}
-TF_BUILTIN(StringCharAt, CodeStubAssembler) {
+TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position);
+ TNode<Int32T> code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}
- Node* result = StringFromCharCode(code);
+ TNode<String> result = StringFromCharCode(code);
Return(result);
}
-TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
+TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
// Load the character code at the {position} from the {receiver}.
- Node* code = StringCharCodeAt(receiver, position);
+ TNode<Int32T> code = StringCharCodeAt(receiver, position);
+
+ // And return it as TaggedSigned value.
+ // TODO(turbofan): Allow builtins to return values untagged.
+ TNode<Smi> result = SmiFromWord32(code);
+ Return(result);
+}
+
+TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* position = Parameter(Descriptor::kPosition);
+ // TODO(sigurds) Figure out if passing length as argument pays off.
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
+ // Load the character code at the {position} from the {receiver}.
+ TNode<Int32T> code =
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- Node* result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromWord32(code);
Return(result);
}
@@ -563,7 +603,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
- TNode<Smi> smi_argc = SmiTag(arguments.GetLength());
+ TNode<Smi> smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
@@ -577,7 +617,8 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// string on the fly otherwise.
Node* code = arguments.AtIndex(0);
Node* code32 = TruncateTaggedToWord32(context, code);
- Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+ TNode<Int32T> code16 =
+ Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
Node* result = StringFromCharCode(code16);
arguments.PopAndReturn(result);
}
@@ -662,115 +703,49 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
}
// ES6 #sec-string.prototype.charat
-TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
+TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
- Node* context = Parameter(Descriptor::kContext);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.charAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- {
- Label return_emptystring(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &return_emptystring);
-
- // Determine the actual length of the {receiver} String.
- TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
-
- // Return "" if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(this);
- Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring,
- &if_positioninbounds);
-
- BIND(&return_emptystring);
- Return(EmptyStringConstant());
-
- BIND(&if_positioninbounds);
- }
-
- // Load the character code at the {position} from the {receiver}.
- CSA_ASSERT(this, IntPtrLessThan(SmiUntag(position),
- LoadStringLengthAsWord(receiver)));
- CSA_ASSERT(this,
- IntPtrGreaterThanOrEqual(SmiUntag(position), IntPtrConstant(0)));
- Node* code = StringCharCodeAt(receiver, SmiUntag(position));
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- // And return the single character string with only that {code}.
- Node* result = StringFromCharCode(code);
- Return(result);
+ GenerateStringAt("String.prototype.charAt", context, receiver, maybe_position,
+ EmptyStringConstant(),
+ [this](TNode<String> string, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ TNode<Int32T> code = StringCharCodeAt(string, index);
+ return StringFromCharCode(code);
+ });
}
// ES6 #sec-string.prototype.charcodeat
-TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
+TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
- Node* context = Parameter(Descriptor::kContext);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.charCodeAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- {
- Label return_nan(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &return_nan);
-
- // Determine the actual length of the {receiver} String.
- TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver);
-
- // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(this);
- Branch(SmiAboveOrEqual(position, receiver_length), &return_nan,
- &if_positioninbounds);
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- BIND(&return_nan);
- Return(NaNConstant());
-
- BIND(&if_positioninbounds);
- }
-
- // Load the character at the {position} from the {receiver}.
- Node* value = StringCharCodeAt(receiver, SmiUntag(position));
- Node* result = SmiFromWord32(value);
- Return(result);
+ GenerateStringAt("String.prototype.charCodeAt", context, receiver,
+ maybe_position, NanConstant(),
+ [this](TNode<String> receiver, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ Node* value = StringCharCodeAt(receiver, index);
+ return SmiFromWord32(value);
+ });
}
// ES6 #sec-string.prototype.codepointat
TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.codePointAt");
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {receiver}.
- Label if_inbounds(this), if_outofbounds(this, Label::kDeferred);
- position =
- ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
- TNode<IntPtrT> untagged_position = SmiUntag(position);
- TNode<IntPtrT> receiver_length = LoadStringLengthAsWord(receiver);
- Branch(UintPtrLessThan(untagged_position, receiver_length), &if_inbounds,
- &if_outofbounds);
-
- BIND(&if_inbounds);
- {
- Node* value = LoadSurrogatePairAt(
- receiver, receiver_length, untagged_position, UnicodeEncoding::UTF32);
- Node* result = SmiFromWord32(value);
- Return(result);
- }
+ TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
- BIND(&if_outofbounds);
- Return(UndefinedConstant());
+ GenerateStringAt("String.prototype.codePointAt", context, receiver,
+ maybe_position, UndefinedConstant(),
+ [this](TNode<String> receiver, TNode<IntPtrT> length,
+ TNode<IntPtrT> index) {
+ Node* value = LoadSurrogatePairAt(receiver, length, index,
+ UnicodeEncoding::UTF32);
+ return SmiFromWord32(value);
+ });
}
// ES6 String.prototype.concat(...args)
@@ -999,7 +974,7 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant) {
CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
Node* const receiver = arguments.GetReceiver();
// From now on use word-size argc value.
- argc = arguments.GetLength();
+ argc = arguments.GetLength(INTPTR_PARAMETERS);
VARIABLE(var_search_string, MachineRepresentation::kTagged);
VARIABLE(var_position, MachineRepresentation::kTagged);
@@ -1217,16 +1192,17 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
Label invalid_count(this), invalid_string_length(this),
return_emptystring(this);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const count = Parameter(Descriptor::kCount);
+ TNode<Object> count = CAST(Parameter(Descriptor::kCount));
Node* const string =
ToThisString(context, receiver, "String.prototype.repeat");
Node* const is_stringempty =
SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0));
- VARIABLE(var_count, MachineRepresentation::kTagged,
- ToInteger(context, count, CodeStubAssembler::kTruncateMinusZero));
+ VARIABLE(
+ var_count, MachineRepresentation::kTagged,
+ ToInteger_Inline(context, count, CodeStubAssembler::kTruncateMinusZero));
// Verifies a valid count and takes a fast path when the result will be an
// empty string.
@@ -1713,8 +1689,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
Node* const start = args.GetOptionalArgumentValue(kStart);
- Node* const end = args.GetOptionalArgumentValue(kEnd);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Smi> const smi_zero = SmiConstant(0);
@@ -1737,7 +1713,7 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
// else let intEnd be ? ToInteger(end).
Node* const end_int =
- ToInteger(context, end, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero);
// 7. If intEnd < 0, let to be max(len + intEnd, 0);
// otherwise let to be min(intEnd, len).
@@ -1893,8 +1869,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Node* const receiver = args.GetReceiver();
Node* const start = args.GetOptionalArgumentValue(kStartArg);
- Node* const length = args.GetOptionalArgumentValue(kLengthArg);
- Node* const context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label out(this);
@@ -1925,8 +1901,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Goto(&if_issmi);
BIND(&if_isnotundefined);
- var_length =
- ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
+ var_length = ToInteger_Inline(context, length,
+ CodeStubAssembler::kTruncateMinusZero);
}
TVARIABLE(Smi, var_result_length);
@@ -1984,7 +1960,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
TVARIABLE(Smi, var_result);
TNode<Object> const value_int =
- this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
@@ -2296,14 +2272,14 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
-TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
+TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
SloppyTNode<String> string, SloppyTNode<IntPtrT> length,
SloppyTNode<IntPtrT> index, UnicodeEncoding encoding) {
Label handle_surrogate_pair(this), return_result(this);
- TVARIABLE(Uint32T, var_result);
- TVARIABLE(Uint32T, var_trail);
+ TVARIABLE(Int32T, var_result);
+ TVARIABLE(Int32T, var_trail);
var_result = StringCharCodeAt(string, index);
- var_trail = Unsigned(Int32Constant(0));
+ var_trail = Int32Constant(0);
GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
@@ -2318,8 +2294,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
BIND(&handle_surrogate_pair);
{
- TNode<Uint32T> lead = var_result;
- TNode<Uint32T> trail = var_trail;
+ TNode<Int32T> lead = var_result;
+ TNode<Int32T> trail = var_trail;
// Check that this path is only taken if a surrogate pair is found
CSA_SLOW_ASSERT(this,
@@ -2331,7 +2307,7 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result = Unsigned(Word32Or(
+ var_result = Signed(Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
Word32Shl(lead, Int32Constant(16)), trail));
@@ -2347,8 +2323,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result = Unsigned(Int32Add(Word32Shl(lead, Int32Constant(10)),
- Int32Add(trail, surrogate_offset)));
+ var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset)));
break;
}
}
@@ -2387,8 +2363,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&next_codepoint);
{
UnicodeEncoding encoding = UnicodeEncoding::UTF16;
- Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
- Node* value = StringFromCodePoint(ch, encoding);
+ TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding);
+ TNode<String> value = StringFromCodePoint(ch, encoding);
var_value.Bind(value);
TNode<IntPtrT> length = LoadStringLengthAsWord(value);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index f1111b3465..1bd5429fdb 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -57,10 +57,19 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
SloppyTNode<Object> value,
SloppyTNode<Smi> limit);
- TNode<Uint32T> LoadSurrogatePairAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> length,
- SloppyTNode<IntPtrT> index,
- UnicodeEncoding encoding);
+ typedef std::function<TNode<Object>(
+ TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>
+ StringAtAccessor;
+
+ void GenerateStringAt(const char* method_name, TNode<Context> context,
+ Node* receiver, TNode<Object> maybe_position,
+ TNode<Object> default_return,
+ StringAtAccessor accessor);
+
+ TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> length,
+ SloppyTNode<IntPtrT> index,
+ UnicodeEncoding encoding);
void StringIndexOf(Node* const subject_string, Node* const search_string,
Node* const position, std::function<void(Node*)> f_return);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 14a74afb6d..d2e447538d 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -324,8 +324,8 @@ namespace {
inline bool ToUpperOverflows(uc32 character) {
// y with umlauts and the micro sign are the only characters that stop
// fitting into one-byte when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- static const uc32 micro_code = 0xb5;
+ static const uc32 yuml_code = 0xFF;
+ static const uc32 micro_code = 0xB5;
return (character == yuml_code || character == micro_code);
}
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index df89d1ced3..b830a8597d 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -36,15 +36,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void SetupTypedArray(Node* holder, Node* length, Node* byte_offset,
- Node* byte_length);
- void AttachBuffer(Node* holder, Node* buffer, Node* map, Node* length,
- Node* byte_offset);
-
- Node* LoadMapForType(Node* array);
- Node* CalculateExternalPointer(Node* backing_store, Node* byte_offset);
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ TNode<Number> byte_offset, TNode<Number> byte_length);
+ void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
+ TNode<Map> map, TNode<Smi> length,
+ TNode<Number> byte_offset);
+
+ TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
+ TNode<Number> byte_offset);
Node* LoadDataPtr(Node* typed_array);
- Node* ByteLengthIsValid(Node* byte_length);
+ TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
@@ -78,9 +80,8 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> offset);
};
-Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
- CSA_ASSERT(this, IsJSTypedArray(array));
-
+TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
+ TNode<JSTypedArray> array) {
Label unreachable(this), done(this);
Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
uint16_elements(this), int16_elements(this), uint32_elements(this),
@@ -99,10 +100,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- VARIABLE(var_typed_map, MachineRepresentation::kTagged);
+ TVARIABLE(Map, var_typed_map);
- Node* array_map = LoadMap(array);
- Node* elements_kind = LoadMapElementsKind(array_map);
+ TNode<Map> array_map = LoadMap(array);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
kTypedElementsKindCount);
@@ -113,7 +114,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
ExternalArrayType type =
isolate()->factory()->GetArrayTypeFromElementsKind(kind);
Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- var_typed_map.Bind(HeapConstant(map));
+ var_typed_map = HeapConstant(map);
Goto(&done);
}
}
@@ -121,7 +122,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
BIND(&unreachable);
{ Unreachable(); }
BIND(&done);
- return var_typed_map.value();
+ return var_typed_map;
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -131,10 +132,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
// can't allocate an array bigger than our 32-bit arithmetic range anyway. 64
// bit platforms could theoretically have an offset up to 2^35 - 1, so we may
// need to convert the float heap number to an intptr.
-Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
- Node* byte_offset) {
- return IntPtrAdd(backing_store,
- ChangeNonnegativeNumberToUintPtr(byte_offset));
+TNode<UintPtrT> TypedArrayBuiltinsAssembler::CalculateExternalPointer(
+ TNode<UintPtrT> backing_store, TNode<Number> byte_offset) {
+ return Unsigned(
+ IntPtrAdd(backing_store, ChangeNonnegativeNumberToUintPtr(byte_offset)));
}
// Setup the TypedArray which is under construction.
@@ -142,14 +143,10 @@ Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store,
// - Set the byte_offset.
// - Set the byte_length.
// - Set EmbedderFields to 0.
-void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
- Node* byte_offset,
- Node* byte_length) {
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsNumber(byte_offset));
- CSA_ASSERT(this, IsNumber(byte_length));
-
+void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
+ TNode<Smi> length,
+ TNode<Number> byte_offset,
+ TNode<Number> byte_length) {
StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset);
StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
@@ -160,15 +157,11 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
}
// Attach an off-heap buffer to a TypedArray.
-void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
- Node* map, Node* length,
- Node* byte_offset) {
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
- CSA_ASSERT(this, IsMap(map));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsNumber(byte_offset));
-
+void TypedArrayBuiltinsAssembler::AttachBuffer(TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Map> map,
+ TNode<Smi> length,
+ TNode<Number> byte_offset) {
StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize);
@@ -177,10 +170,11 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0));
- Node* backing_store = LoadObjectField(
- buffer, JSArrayBuffer::kBackingStoreOffset, MachineType::Pointer());
+ TNode<UintPtrT> backing_store =
+ LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kBackingStoreOffset);
- Node* external_pointer = CalculateExternalPointer(backing_store, byte_offset);
+ TNode<UintPtrT> external_pointer =
+ CalculateExternalPointer(backing_store, byte_offset);
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer,
MachineType::PointerRepresentation());
@@ -189,23 +183,16 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
}
TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* buffer = Parameter(Descriptor::kBuffer);
- Node* element_size = Parameter(Descriptor::kElementSize);
- Node* byte_offset = Parameter(Descriptor::kByteOffset);
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- CSA_ASSERT(this, IsNumber(byte_offset));
+ TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
+ TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
+ TNode<JSArrayBuffer> buffer = CAST(Parameter(Descriptor::kBuffer));
+ TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
+ TNode<Number> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
- Node* fixed_typed_map = LoadMapForType(holder);
+ TNode<Map> fixed_typed_map = LoadMapForType(holder);
// SmiMul returns a heap number in case of Smi overflow.
- Node* byte_length = SmiMul(length, element_size);
- CSA_ASSERT(this, IsNumber(byte_length));
+ TNode<Number> byte_length = SmiMul(length, element_size);
SetupTypedArray(holder, length, byte_offset, byte_length);
AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset);
@@ -213,18 +200,17 @@ TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
}
TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
+ TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder));
+ TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
Node* initialize = Parameter(Descriptor::kInitialize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
CSA_ASSERT(this, IsBoolean(initialize));
- Node* byte_offset = SmiConstant(0);
+ TNode<Smi> byte_offset = SmiConstant(0);
static const int32_t fta_base_data_offset =
FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
@@ -235,16 +221,16 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
VARIABLE(var_total_size, MachineType::PointerRepresentation());
// SmiMul returns a heap number in case of Smi overflow.
- Node* byte_length = SmiMul(length, element_size);
- CSA_ASSERT(this, IsNumber(byte_length));
+ TNode<Number> byte_length = SmiMul(length, element_size);
SetupTypedArray(holder, length, byte_offset, byte_length);
- Node* fixed_typed_map = LoadMapForType(holder);
+ TNode<Map> fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
GotoIf(
SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
&allocate_off_heap);
+ TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length));
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -297,7 +283,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
Node* aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- Node* size = IntPtrAdd(SmiToWord(byte_length), aligned_header_size);
+ Node* size = IntPtrAdd(word_byte_length, aligned_header_size);
var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
Goto(&allocate_elements);
}
@@ -305,7 +291,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&aligned);
{
Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size.Bind(IntPtrAdd(SmiToWord(byte_length), header_size));
+ var_total_size.Bind(IntPtrAdd(word_byte_length, header_size));
Goto(&allocate_elements);
}
@@ -344,11 +330,11 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
ExternalConstant(ExternalReference::libc_memset_function(isolate()));
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::IntPtr(), MachineType::UintPtr(), memset,
- backing_store, IntPtrConstant(0), SmiToWord(byte_length));
+ backing_store, IntPtrConstant(0), word_byte_length);
Goto(&done);
}
- VARIABLE(var_buffer, MachineRepresentation::kTagged);
+ TVARIABLE(JSArrayBuffer, var_buffer);
BIND(&allocate_off_heap);
{
@@ -356,8 +342,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* buffer_constructor = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX);
- var_buffer.Bind(ConstructJS(CodeFactory::Construct(isolate()), context,
- buffer_constructor, byte_length));
+ var_buffer = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ buffer_constructor, byte_length));
Goto(&attach_buffer);
}
@@ -365,16 +351,15 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
{
Node* buffer_constructor_noinit = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_NOINIT_FUN_INDEX);
- var_buffer.Bind(CallJS(CodeFactory::Call(isolate()), context,
- buffer_constructor_noinit, UndefinedConstant(),
- byte_length));
+ var_buffer = CAST(CallJS(CodeFactory::Call(isolate()), context,
+ buffer_constructor_noinit, UndefinedConstant(),
+ byte_length));
Goto(&attach_buffer);
}
BIND(&attach_buffer);
{
- AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
- byte_offset);
+ AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset);
Goto(&done);
}
@@ -385,18 +370,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// ES6 #sec-typedarray-length
TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
Node* holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength));
+ TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Node* initialize = TrueConstant();
-
Label invalid_length(this);
- length = ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Number> length = ToInteger_Inline(
+ context, maybe_length, CodeStubAssembler::kTruncateMinusZero);
+
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
// representation (which uses smi's).
@@ -404,7 +389,7 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, initialize);
+ element_size, TrueConstant());
Return(UndefinedConstant());
BIND(&invalid_length);
@@ -419,10 +404,10 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* holder = Parameter(Descriptor::kHolder);
Node* buffer = Parameter(Descriptor::kBuffer);
- Node* byte_offset = Parameter(Descriptor::kByteOffset);
+ TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
Node* length = Parameter(Descriptor::kLength);
Node* element_size = Parameter(Descriptor::kElementSize);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, IsJSArrayBuffer(buffer));
@@ -440,8 +425,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
GotoIf(IsUndefined(byte_offset), &check_length);
- offset.Bind(
- ToInteger(context, byte_offset, CodeStubAssembler::kTruncateMinusZero));
+ offset.Bind(ToInteger_Inline(context, byte_offset,
+ CodeStubAssembler::kTruncateMinusZero));
Branch(TaggedIsSmi(offset.value()), &offset_is_smi, &offset_not_smi);
// Check that the offset is a multiple of the element size.
@@ -569,25 +554,27 @@ Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
return IntPtrAdd(base_pointer, external_pointer);
}
-Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(Node* byte_length) {
+TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
+ TNode<Number> byte_length) {
Label smi(this), done(this);
- VARIABLE(is_valid, MachineRepresentation::kWord32);
+ TVARIABLE(BoolT, is_valid);
GotoIf(TaggedIsSmi(byte_length), &smi);
- CSA_ASSERT(this, IsHeapNumber(byte_length));
- Node* float_value = LoadHeapNumberValue(byte_length);
- Node* max_byte_length_double =
+ TNode<Float64T> float_value = LoadHeapNumberValue(CAST(byte_length));
+ TNode<Float64T> max_byte_length_double =
Float64Constant(FixedTypedArrayBase::kMaxByteLength);
- is_valid.Bind(Float64LessThanOrEqual(float_value, max_byte_length_double));
+ is_valid = Float64LessThanOrEqual(float_value, max_byte_length_double);
Goto(&done);
BIND(&smi);
- Node* max_byte_length = IntPtrConstant(FixedTypedArrayBase::kMaxByteLength);
- is_valid.Bind(UintPtrLessThanOrEqual(SmiUntag(byte_length), max_byte_length));
+ TNode<IntPtrT> max_byte_length =
+ IntPtrConstant(FixedTypedArrayBase::kMaxByteLength);
+ is_valid =
+ UintPtrLessThanOrEqual(SmiUntag(CAST(byte_length)), max_byte_length);
Goto(&done);
BIND(&done);
- return is_valid.value();
+ return is_valid;
}
TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
@@ -611,8 +598,8 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
Return(UndefinedConstant());
BIND(&fill);
- Node* holder_kind = LoadMapElementsKind(LoadMap(holder));
- Node* source_kind = LoadMapElementsKind(LoadMap(array_like));
+ TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
+ TNode<Int32T> source_kind = LoadMapElementsKind(LoadMap(array_like));
GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy);
// Copy using the elements accessor.
@@ -632,9 +619,10 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
array_like, JSTypedArray::kBufferOffset)),
Int32Constant(0)));
- Node* byte_length = SmiMul(length, element_size);
+ TNode<Number> byte_length = SmiMul(length, element_size);
CSA_ASSERT(this, ByteLengthIsValid(byte_length));
- Node* byte_length_intptr = ChangeNonnegativeNumberToUintPtr(byte_length);
+ TNode<UintPtrT> byte_length_intptr =
+ ChangeNonnegativeNumberToUintPtr(byte_length);
CSA_ASSERT(this, UintPtrLessThanOrEqual(
byte_length_intptr,
IntPtrConstant(FixedTypedArrayBase::kMaxByteLength)));
@@ -831,24 +819,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
BIND(&fast_c_call);
{
- // Overlapping backing stores of different element kinds are handled in
- // runtime. We're a bit conservative here and bail to runtime if ranges
- // overlap and element kinds differ.
-
- TNode<IntPtrT> target_byte_length =
- IntPtrMul(target_length, target_el_size);
CSA_ASSERT(
- this, UintPtrGreaterThanOrEqual(target_byte_length, IntPtrConstant(0)));
-
- TNode<IntPtrT> target_data_end_ptr =
- IntPtrAdd(target_data_ptr, target_byte_length);
- TNode<IntPtrT> source_data_end_ptr =
- IntPtrAdd(source_data_ptr, source_byte_length);
-
- GotoIfNot(
- Word32Or(UintPtrLessThanOrEqual(target_data_end_ptr, source_data_ptr),
- UintPtrLessThanOrEqual(source_data_end_ptr, target_data_ptr)),
- call_runtime);
+ this, UintPtrGreaterThanOrEqual(
+ IntPtrMul(target_length, target_el_size), IntPtrConstant(0)));
TNode<IntPtrT> source_length =
LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
@@ -959,8 +932,8 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
// Normalize offset argument (using ToInteger) and handle heap number cases.
TNode<Object> offset = args.GetOptionalArgumentValue(1, SmiConstant(0));
- TNode<Number> offset_num = ToInteger(context, offset, kTruncateMinusZero);
- CSA_ASSERT(this, IsNumberNormalized(offset_num));
+ TNode<Number> offset_num =
+ ToInteger_Inline(context, offset, kTruncateMinusZero);
// Since ToInteger always returns a Smi if the given value is within Smi
// range, and the only corner case of -0.0 has already been truncated to 0.0,
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index f328268288..6af5eff357 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -47,6 +47,9 @@ class CodeAssemblerState;
void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
Name##Assembler assembler(state); \
state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
+ if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
+ assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
+ } \
assembler.Generate##Name##Impl(); \
} \
void Name##Assembler::Generate##Name##Impl()
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 27199c8462..3493e776b6 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -85,7 +85,8 @@ class BuiltinArguments : public Arguments {
V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+ RuntimeCallTimerScope timer(isolate, \
+ RuntimeCallCounterId::kBuiltin_##name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Builtin_" #name); \
return Builtin_Impl_##name(args, isolate); \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 55fc1c8cd8..dc175e50b7 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -172,11 +172,25 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
#undef CASE_OTHER
case kArrayFilterLoopEagerDeoptContinuation:
case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
case kArrayForEach:
case kArrayForEachLoopEagerDeoptContinuation:
case kArrayForEachLoopLazyDeoptContinuation:
case kArrayMapLoopEagerDeoptContinuation:
case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
case kConsoleAssert:
return Callable(code, BuiltinDescriptor(isolate));
default:
@@ -213,12 +227,30 @@ bool Builtins::IsLazy(int index) {
// TODO(wasm): Remove wasm builtins once immovability is no longer required.
switch (index) {
case kAbort: // Required by wasm.
+ case kArrayFindLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFindLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ // https://crbug.com/v8/6786.
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ // https://crbug.com/v8/6786.
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayEveryLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 7635bada49..368e6670c1 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -211,13 +211,15 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Push(esi);
__ Push(ecx);
__ Push(edi);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
__ Push(edx);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- edi and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: argument count
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- edi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: argument count
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -237,10 +239,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax: implicit receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -260,9 +263,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- edx: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -283,9 +287,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- ecx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- edi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- edi and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
@@ -301,9 +306,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -572,7 +578,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ Assert(equal, kMissingBytecodeArray);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -694,6 +700,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -708,7 +717,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ cmp(
optimized_code_entry,
Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(equal, kExpectedOptimizationSentinel);
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -791,7 +800,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -852,7 +860,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1239,7 +1249,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
ebx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1300,7 +1312,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1818,9 +1830,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(not_zero,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(equal,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1847,9 +1861,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -1875,6 +1889,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1);
__ lea(edi, Operand(eax, eax, times_1, kSmiTag));
__ push(edi);
+
+ __ Push(Immediate(0)); // Padding.
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1980,7 +1996,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
{
@@ -2294,7 +2310,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
+ Immediate(Map::IsCallableBit::kMask));
__ j(zero, &non_callable);
// Call CallProxy external builtin
@@ -2389,7 +2405,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2464,19 +2480,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : message as String object
- // -- esp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 167bc1b829..7af02bb32e 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, t0,
- Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, t0,
- Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ t0, Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, t0,
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0,
Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, t0,
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0,
Operand(MAP_TYPE));
}
@@ -271,13 +271,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
- __ Push(cp, a0, a1, a3);
+ __ Push(cp, a0, a1);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Push(a3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- a1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -298,10 +301,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -319,9 +323,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -342,9 +347,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- t3: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- a1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- a1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
@@ -362,9 +368,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -635,7 +642,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(a3, a3, a3);
- __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
}
// Resume (Ignition/TurboFan) generator object.
@@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
- eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ eq, AbortReason::kExpectedOptimizationSentinel,
+ optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
@@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ t0, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ t0, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
@@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ at, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
@@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
@@ -1804,8 +1819,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ Addu(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1814,8 +1830,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -1891,7 +1906,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(a3, &new_target_not_constructor);
__ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsConstructor));
+ __ And(t1, t1, Operand(Map::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
__ bind(&new_target_not_constructor);
{
@@ -2165,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ And(t1, t1, Operand(Map::IsCallableBit::kMask));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Check if target is a proxy and call CallProxy external builtin
@@ -2321,7 +2336,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Only dispatch to bound functions after checking whether they are
@@ -2389,17 +2404,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message as String object
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2489,8 +2493,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(t1, fp, Operand(t2));
// Adjust for frame.
- __ Subu(t1, t1, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ Subu(t1, t1,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 811ae637ad..266393070c 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
- Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, a4,
- Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ a4, Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, a4,
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
Operand(MAP_TYPE));
}
@@ -273,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
- __ Push(cp, a0, a1, a3);
+ __ Push(cp, a0, a1);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Push(a3);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- a1 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -300,10 +303,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -321,9 +325,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r3: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -344,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- t3: counter
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- a1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- a1 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
@@ -364,9 +370,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- v0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -526,7 +533,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(a3, a3, a3);
- __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
}
// Resume (Ignition/TurboFan) generator object.
@@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) {
__ Assert(
- eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ eq, AbortReason::kExpectedOptimizationSentinel,
+ optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
}
__ jmp(&fallthrough);
@@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, a4);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a4, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a4, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
@@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
- Operand(zero_reg));
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ at, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
- Operand(BYTECODE_ARRAY_TYPE));
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
}
// Get the target bytecode offset from the frame.
@@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
Operand(at));
}
@@ -1820,8 +1835,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ dsll32(a0, a0, 0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ Daddu(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1830,8 +1846,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ Ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ Ld(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ SmiScale(a4, a1, kPointerSizeLog2);
@@ -1915,7 +1930,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(a3, &new_target_not_constructor);
__ ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
__ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsConstructor));
+ __ And(t1, t1, Operand(Map::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
__ bind(&new_target_not_constructor);
{
@@ -2187,7 +2202,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ And(t1, t1, Operand(Map::IsCallableBit::kMask));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
@@ -2340,7 +2355,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Only dispatch to bound functions after checking whether they are
@@ -2408,17 +2423,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : message as String object
- // -- ra : return address
- // -----------------------------------
- __ Push(a0);
- __ Move(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2510,8 +2514,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ dsll(a6, a2, kPointerSizeLog2);
__ Dsubu(a4, fp, Operand(a6));
// Adjust for frame.
- __ Dsubu(a4, a4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ Dsubu(a4, a4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e0db87cc0c..34da70ff0f 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ cr0);
__ CompareObjectType(r5, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ mr(r6, r4);
@@ -278,13 +279,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(r3);
- __ Push(cp, r3, r4, r6);
+ __ Push(cp, r3, r4);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ Push(r6);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r4 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r4 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -305,10 +309,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r3: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -326,9 +331,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r6: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -348,9 +354,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- cr0: condition indicating whether r3 is zero
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r4 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r4 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
@@ -373,9 +380,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -545,7 +553,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -636,8 +644,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r4, r5);
@@ -773,6 +779,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ beq(&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -787,7 +796,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ b(&fallthrough);
}
@@ -868,11 +877,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
-
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@@ -941,10 +948,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1226,10 +1236,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1291,7 +1304,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1867,8 +1880,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
} else {
__ Push(fp, r7, r4, r3);
}
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ addi(fp, sp,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1877,8 +1891,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ LoadP(r4, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
int stack_adjustment = kPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r0, r4);
@@ -1956,7 +1969,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r6, &new_target_not_constructor);
__ LoadP(scratch, FieldMemOperand(r6, HeapObject::kMapOffset));
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ TestBit(scratch, Map::kIsConstructor, r0);
+ __ TestBit(scratch, Map::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
__ bind(&new_target_not_constructor);
{
@@ -2253,7 +2266,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
+ __ TestBit(r7, Map::IsCallableBit::kShift, r0);
__ beq(&non_callable, cr0);
// Check if target is a proxy and call CallProxy external builtin
@@ -2349,7 +2362,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r5, Map::kIsConstructor, r0);
+ __ TestBit(r5, Map::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
// Only dispatch to bound functions after checking whether they are
@@ -2419,17 +2432,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ push(r4);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -2524,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
// Adjust for frame.
- __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ subi(r7, r7,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 42c478bd42..020b04b91d 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
+ cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ LoadRR(r5, r3);
@@ -272,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ SmiTag(r2);
- __ Push(cp, r2, r3, r5);
+ __ Push(cp, r2, r3);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ Push(r5);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- r3 and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments (tagged)
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- r3 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
@@ -300,10 +304,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r2: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -321,9 +326,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- r5: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -343,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- cr0: condition indicating whether r2 is zero
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- r3 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- r3 and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ beq(&no_args);
@@ -366,9 +373,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: number of arguments
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -540,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kMissingBytecodeArray);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -632,8 +640,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
- __ InitializeRootRegister();
-
// Push the function and the receiver onto the stack.
__ Push(r3, r4);
@@ -776,6 +782,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ beq(&fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -790,7 +799,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ CmpSmiLiteral(
optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, kExpectedOptimizationSentinel);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
__ b(&fallthrough, Label::kNear);
}
@@ -870,10 +879,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+
// Load the size of the current bytecode.
__ bind(&load_size);
-
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@@ -938,10 +946,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -1224,10 +1234,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
- __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1289,7 +1301,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(ne, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1854,7 +1866,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Old FP <--- New FP
// Argument Adapter SMI
// Function
- // ArgC as SMI <--- New SP
+ // ArgC as SMI
+ // Padding <--- New SP
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Cleanse the top nibble of 31-bit pointers.
@@ -1864,8 +1877,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
- __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ __ Push(Smi::kZero); // Padding.
+ __ la(fp,
+ MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1874,8 +1888,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
+ __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
int stack_adjustment = kPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r3, r3);
@@ -1954,7 +1967,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(r5, &new_target_not_constructor);
__ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tmll(scratch, Operand(Map::kIsConstructor));
+ __ tmll(scratch, Operand(Map::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
__ bind(&new_target_not_constructor);
{
@@ -2252,7 +2265,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
+ __ TestBit(r6, Map::IsCallableBit::kShift);
__ beq(&non_callable);
// Check if target is a proxy and call CallProxy external builtin
@@ -2348,7 +2361,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r4, Map::kIsConstructor);
+ __ TestBit(r4, Map::IsConstructorBit::kShift);
__ beq(&non_constructor);
// Only dispatch to bound functions after checking whether they are
@@ -2418,17 +2431,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : message as String object
- // -- lr : return address
- // -----------------------------------
- __ push(r3);
- __ LoadSmiLiteral(cp, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
@@ -2522,8 +2524,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
- __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ SubP(r6, r6,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index b9073e1f13..5a09658867 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -107,7 +107,11 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- Zone zone(isolate->allocator(), ZONE_NAME);
+
+ SegmentSize segment_size = isolate->serializer_enabled()
+ ? SegmentSize::kLarge
+ : SegmentSize::kDefault;
+ Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
@@ -127,7 +131,10 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- Zone zone(isolate->allocator(), ZONE_NAME);
+ SegmentSize segment_size = isolate->serializer_enabled()
+ ? SegmentSize::kLarge
+ : SegmentSize::kDefault;
+ Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index f2820fa410..cd35abb362 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -170,7 +170,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
- __ j(greater_equal, &loop);
+ __ j(greater_equal, &loop, Label::kNear);
// Call the function.
// rax: number of arguments (untagged)
@@ -217,19 +217,21 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Push(rsi);
__ Push(rcx);
__ Push(rdi);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
__ Push(rdx);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
- // -- rdi and sp[1*kPointerSize]: constructor function
- // -- sp[2*kPointerSize]: argument count
- // -- sp[3*kPointerSize]: context
+ // -- sp[1*kPointerSize]: padding
+ // -- rdi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: argument count
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
Immediate(SharedFunctionInfo::kDerivedConstructorMask));
- __ j(not_zero, &not_create_implicit_receiver);
+ __ j(not_zero, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
@@ -243,10 +245,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax implicit receiver
- // -- Slot 3 / sp[0*kPointerSize] new target
- // -- Slot 2 / sp[1*kPointerSize] constructor function
- // -- Slot 1 / sp[2*kPointerSize] number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize] context
+ // -- Slot 4 / sp[0*kPointerSize] new target
+ // -- Slot 3 / sp[1*kPointerSize] padding
+ // -- Slot 2 / sp[2*kPointerSize] constructor function
+ // -- Slot 1 / sp[3*kPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize] context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -265,9 +268,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- sp[0*kPointerSize] implicit receiver
// -- sp[1*kPointerSize] implicit receiver
- // -- sp[2*kPointerSize] constructor function
- // -- sp[3*kPointerSize] number of arguments (tagged)
- // -- sp[4*kPointerSize] context
+ // -- sp[2*kPointerSize] padding
+ // -- sp[3*kPointerSize] constructor function
+ // -- sp[4*kPointerSize] number of arguments (tagged)
+ // -- sp[5*kPointerSize] context
// -----------------------------------
// Restore constructor function and argument count.
@@ -288,16 +292,17 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- rcx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
- // -- rdi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[2*kPointerSize]: padding
+ // -- rdi and sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
- __ j(greater_equal, &loop);
+ __ j(greater_equal, &loop, Label::kNear);
// Call the function.
ParameterCount actual(rax);
@@ -306,9 +311,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax constructor result
// -- sp[0*kPointerSize] implicit receiver
- // -- sp[1*kPointerSize] constructor function
- // -- sp[2*kPointerSize] number of arguments
- // -- sp[3*kPointerSize] context
+ // -- sp[1*kPointerSize] padding
+ // -- sp[2*kPointerSize] constructor function
+ // -- sp[3*kPointerSize] number of arguments
+ // -- sp[4*kPointerSize] context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -363,7 +369,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// on-stack receiver as the result.
__ bind(&use_receiver);
__ movp(rax, Operand(rsp, 0 * kPointerSize));
- __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw);
+ __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw, Label::kNear);
__ bind(&leave_frame);
// Restore the arguments count.
@@ -519,7 +525,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
- __ jmp(&enough_stack_space);
+ __ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -540,7 +546,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ addp(rcx, Immediate(1));
__ bind(&entry);
__ cmpp(rcx, rax);
- __ j(not_equal, &loop);
+ __ j(not_equal, &loop, Label::kNear);
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -642,7 +648,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
- __ Assert(equal, kMissingBytecodeArray);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
}
// Resume (Ignition/TurboFan) generator object.
@@ -768,6 +774,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ j(equal, &fallthrough);
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kCompileOptimized,
Runtime::kCompileOptimized_NotConcurrent);
TailCallRuntimeIfMarkerEquals(
@@ -781,7 +790,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
if (FLAG_debug_code) {
__ SmiCompare(optimized_code_entry,
Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
- __ Assert(equal, kExpectedOptimizationSentinel);
+ __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
}
__ jmp(&fallthrough);
}
@@ -859,7 +868,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
@@ -922,7 +930,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age.
@@ -958,7 +968,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label loop_header;
Label loop_check;
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(always, &loop_check);
+ __ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ Push(rax);
@@ -1051,7 +1061,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Push the arguments.
Label loop_header, loop_check;
- __ j(always, &loop_check);
+ __ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
__ subp(start_address, Immediate(kPointerSize));
@@ -1212,7 +1222,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rbx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(
+ equal,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
@@ -1274,7 +1286,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// The feedback vector must be defined.
if (FLAG_debug_code) {
__ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
}
// Is there an optimization marker or optimized code in the feedback vector?
@@ -1806,9 +1818,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Check(not_smi,
+ AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1835,9 +1848,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
__ movp(rdx, rdi);
@@ -1863,6 +1876,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
__ Push(r8);
+
+ __ Push(Immediate(0)); // Padding.
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -1922,19 +1937,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_AbortJS(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : message as String object
- // -- rsp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbortJS);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
@@ -2115,7 +2117,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
{
@@ -2242,7 +2244,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Patch receiver to global proxy.
__ LoadGlobalProxy(rcx);
}
- __ jmp(&convert_receiver);
+ __ jmp(&convert_receiver, Label::kNear);
}
__ bind(&convert_to_object);
{
@@ -2419,12 +2421,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &non_callable);
+ Immediate(Map::IsCallableBit::kMask));
+ __ j(zero, &non_callable, Label::kNear);
// Check if target is a proxy and call CallProxy external builtin
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ j(not_equal, &non_function, Label::kNear);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
// 2. Call to something else, which might have a [[Call]] internal method (if
@@ -2516,7 +2518,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Check if target has a [[Construct]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
+ Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2527,7 +2529,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Only dispatch to proxies after checking whether they are constructors.
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
- __ j(not_equal, &non_proxy);
+ __ j(not_equal, &non_proxy, Label::kNear);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2568,7 +2570,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
Label skip;
// If the code object is null, just return to the caller.
- __ cmpp(rax, Immediate(0));
+ __ testp(rax, rax);
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index b160c11bed..08c9781414 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -22,93 +22,93 @@ struct CachedPower {
};
static const CachedPower kCachedPowers[] = {
- {V8_2PART_UINT64_C(0xfa8fd5a0, 081c0288), -1220, -348},
- {V8_2PART_UINT64_C(0xbaaee17f, a23ebf76), -1193, -340},
- {V8_2PART_UINT64_C(0x8b16fb20, 3055ac76), -1166, -332},
- {V8_2PART_UINT64_C(0xcf42894a, 5dce35ea), -1140, -324},
- {V8_2PART_UINT64_C(0x9a6bb0aa, 55653b2d), -1113, -316},
- {V8_2PART_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
- {V8_2PART_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
- {V8_2PART_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
- {V8_2PART_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
- {V8_2PART_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
- {V8_2PART_UINT64_C(0xd3515c28, 31559a83), -954, -268},
- {V8_2PART_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
- {V8_2PART_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
- {V8_2PART_UINT64_C(0xaecc4991, 4078536d), -874, -244},
- {V8_2PART_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
- {V8_2PART_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
- {V8_2PART_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
- {V8_2PART_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
- {V8_2PART_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
- {V8_2PART_UINT64_C(0xef340a98, 172aace5), -715, -196},
- {V8_2PART_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
- {V8_2PART_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
- {V8_2PART_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
- {V8_2PART_UINT64_C(0x936b9fce, bb25c996), -608, -164},
- {V8_2PART_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
- {V8_2PART_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
- {V8_2PART_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
- {V8_2PART_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
- {V8_2PART_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
- {V8_2PART_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
- {V8_2PART_UINT64_C(0x964e858c, 91ba2655), -422, -108},
- {V8_2PART_UINT64_C(0xdff97724, 70297ebd), -396, -100},
- {V8_2PART_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
- {V8_2PART_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
- {V8_2PART_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
- {V8_2PART_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
- {V8_2PART_UINT64_C(0xcdb02555, 653131b6), -263, -60},
- {V8_2PART_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
- {V8_2PART_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
- {V8_2PART_UINT64_C(0xaa242499, 697392d3), -183, -36},
- {V8_2PART_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
- {V8_2PART_UINT64_C(0xbce50864, 92111aeb), -130, -20},
- {V8_2PART_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
- {V8_2PART_UINT64_C(0xd1b71758, e219652c), -77, -4},
- {V8_2PART_UINT64_C(0x9c400000, 00000000), -50, 4},
- {V8_2PART_UINT64_C(0xe8d4a510, 00000000), -24, 12},
- {V8_2PART_UINT64_C(0xad78ebc5, ac620000), 3, 20},
- {V8_2PART_UINT64_C(0x813f3978, f8940984), 30, 28},
- {V8_2PART_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
- {V8_2PART_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
- {V8_2PART_UINT64_C(0xd5d238a4, abe98068), 109, 52},
- {V8_2PART_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
- {V8_2PART_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
- {V8_2PART_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
- {V8_2PART_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
- {V8_2PART_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
- {V8_2PART_UINT64_C(0x924d692c, a61be758), 269, 100},
- {V8_2PART_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
- {V8_2PART_UINT64_C(0xa26da399, 9aef774a), 322, 116},
- {V8_2PART_UINT64_C(0xf209787b, b47d6b85), 348, 124},
- {V8_2PART_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
- {V8_2PART_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
- {V8_2PART_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
- {V8_2PART_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
- {V8_2PART_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
- {V8_2PART_UINT64_C(0xa59bc234, db398c25), 508, 172},
- {V8_2PART_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
- {V8_2PART_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
- {V8_2PART_UINT64_C(0x88fcf317, f22241e2), 588, 196},
- {V8_2PART_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
- {V8_2PART_UINT64_C(0x98165af3, 7b2153df), 641, 212},
- {V8_2PART_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
- {V8_2PART_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
- {V8_2PART_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
- {V8_2PART_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
- {V8_2PART_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
- {V8_2PART_UINT64_C(0xd01fef10, a657842c), 800, 260},
- {V8_2PART_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
- {V8_2PART_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
- {V8_2PART_UINT64_C(0xac2820d9, 623bf429), 880, 284},
- {V8_2PART_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
- {V8_2PART_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
- {V8_2PART_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
- {V8_2PART_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
- {V8_2PART_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
- {V8_2PART_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
- {V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+ {V8_2PART_UINT64_C(0xFA8FD5A0, 081C0288), -1220, -348},
+ {V8_2PART_UINT64_C(0xBAAEE17F, A23EBF76), -1193, -340},
+ {V8_2PART_UINT64_C(0x8B16FB20, 3055AC76), -1166, -332},
+ {V8_2PART_UINT64_C(0xCF42894A, 5DCE35EA), -1140, -324},
+ {V8_2PART_UINT64_C(0x9A6BB0AA, 55653B2D), -1113, -316},
+ {V8_2PART_UINT64_C(0xE61ACF03, 3D1A45DF), -1087, -308},
+ {V8_2PART_UINT64_C(0xAB70FE17, C79AC6CA), -1060, -300},
+ {V8_2PART_UINT64_C(0xFF77B1FC, BEBCDC4F), -1034, -292},
+ {V8_2PART_UINT64_C(0xBE5691EF, 416BD60C), -1007, -284},
+ {V8_2PART_UINT64_C(0x8DD01FAD, 907FFC3C), -980, -276},
+ {V8_2PART_UINT64_C(0xD3515C28, 31559A83), -954, -268},
+ {V8_2PART_UINT64_C(0x9D71AC8F, ADA6C9B5), -927, -260},
+ {V8_2PART_UINT64_C(0xEA9C2277, 23EE8BCB), -901, -252},
+ {V8_2PART_UINT64_C(0xAECC4991, 4078536D), -874, -244},
+ {V8_2PART_UINT64_C(0x823C1279, 5DB6CE57), -847, -236},
+ {V8_2PART_UINT64_C(0xC2109436, 4DFB5637), -821, -228},
+ {V8_2PART_UINT64_C(0x9096EA6F, 3848984F), -794, -220},
+ {V8_2PART_UINT64_C(0xD77485CB, 25823AC7), -768, -212},
+ {V8_2PART_UINT64_C(0xA086CFCD, 97BF97F4), -741, -204},
+ {V8_2PART_UINT64_C(0xEF340A98, 172AACE5), -715, -196},
+ {V8_2PART_UINT64_C(0xB23867FB, 2A35B28E), -688, -188},
+ {V8_2PART_UINT64_C(0x84C8D4DF, D2C63F3B), -661, -180},
+ {V8_2PART_UINT64_C(0xC5DD4427, 1AD3CDBA), -635, -172},
+ {V8_2PART_UINT64_C(0x936B9FCE, BB25C996), -608, -164},
+ {V8_2PART_UINT64_C(0xDBAC6C24, 7D62A584), -582, -156},
+ {V8_2PART_UINT64_C(0xA3AB6658, 0D5FDAF6), -555, -148},
+ {V8_2PART_UINT64_C(0xF3E2F893, DEC3F126), -529, -140},
+ {V8_2PART_UINT64_C(0xB5B5ADA8, AAFF80B8), -502, -132},
+ {V8_2PART_UINT64_C(0x87625F05, 6C7C4A8B), -475, -124},
+ {V8_2PART_UINT64_C(0xC9BCFF60, 34C13053), -449, -116},
+ {V8_2PART_UINT64_C(0x964E858C, 91BA2655), -422, -108},
+ {V8_2PART_UINT64_C(0xDFF97724, 70297EBD), -396, -100},
+ {V8_2PART_UINT64_C(0xA6DFBD9F, B8E5B88F), -369, -92},
+ {V8_2PART_UINT64_C(0xF8A95FCF, 88747D94), -343, -84},
+ {V8_2PART_UINT64_C(0xB9447093, 8FA89BCF), -316, -76},
+ {V8_2PART_UINT64_C(0x8A08F0F8, BF0F156B), -289, -68},
+ {V8_2PART_UINT64_C(0xCDB02555, 653131B6), -263, -60},
+ {V8_2PART_UINT64_C(0x993FE2C6, D07B7FAC), -236, -52},
+ {V8_2PART_UINT64_C(0xE45C10C4, 2A2B3B06), -210, -44},
+ {V8_2PART_UINT64_C(0xAA242499, 697392D3), -183, -36},
+ {V8_2PART_UINT64_C(0xFD87B5F2, 8300CA0E), -157, -28},
+ {V8_2PART_UINT64_C(0xBCE50864, 92111AEB), -130, -20},
+ {V8_2PART_UINT64_C(0x8CBCCC09, 6F5088CC), -103, -12},
+ {V8_2PART_UINT64_C(0xD1B71758, E219652C), -77, -4},
+ {V8_2PART_UINT64_C(0x9C400000, 00000000), -50, 4},
+ {V8_2PART_UINT64_C(0xE8D4A510, 00000000), -24, 12},
+ {V8_2PART_UINT64_C(0xAD78EBC5, AC620000), 3, 20},
+ {V8_2PART_UINT64_C(0x813F3978, F8940984), 30, 28},
+ {V8_2PART_UINT64_C(0xC097CE7B, C90715B3), 56, 36},
+ {V8_2PART_UINT64_C(0x8F7E32CE, 7BEA5C70), 83, 44},
+ {V8_2PART_UINT64_C(0xD5D238A4, ABE98068), 109, 52},
+ {V8_2PART_UINT64_C(0x9F4F2726, 179A2245), 136, 60},
+ {V8_2PART_UINT64_C(0xED63A231, D4C4FB27), 162, 68},
+ {V8_2PART_UINT64_C(0xB0DE6538, 8CC8ADA8), 189, 76},
+ {V8_2PART_UINT64_C(0x83C7088E, 1AAB65DB), 216, 84},
+ {V8_2PART_UINT64_C(0xC45D1DF9, 42711D9A), 242, 92},
+ {V8_2PART_UINT64_C(0x924D692C, A61BE758), 269, 100},
+ {V8_2PART_UINT64_C(0xDA01EE64, 1A708DEA), 295, 108},
+ {V8_2PART_UINT64_C(0xA26DA399, 9AEF774A), 322, 116},
+ {V8_2PART_UINT64_C(0xF209787B, B47D6B85), 348, 124},
+ {V8_2PART_UINT64_C(0xB454E4A1, 79DD1877), 375, 132},
+ {V8_2PART_UINT64_C(0x865B8692, 5B9BC5C2), 402, 140},
+ {V8_2PART_UINT64_C(0xC83553C5, C8965D3D), 428, 148},
+ {V8_2PART_UINT64_C(0x952AB45C, FA97A0B3), 455, 156},
+ {V8_2PART_UINT64_C(0xDE469FBD, 99A05FE3), 481, 164},
+ {V8_2PART_UINT64_C(0xA59BC234, DB398C25), 508, 172},
+ {V8_2PART_UINT64_C(0xF6C69A72, A3989F5C), 534, 180},
+ {V8_2PART_UINT64_C(0xB7DCBF53, 54E9BECE), 561, 188},
+ {V8_2PART_UINT64_C(0x88FCF317, F22241E2), 588, 196},
+ {V8_2PART_UINT64_C(0xCC20CE9B, D35C78A5), 614, 204},
+ {V8_2PART_UINT64_C(0x98165AF3, 7B2153DF), 641, 212},
+ {V8_2PART_UINT64_C(0xE2A0B5DC, 971F303A), 667, 220},
+ {V8_2PART_UINT64_C(0xA8D9D153, 5CE3B396), 694, 228},
+ {V8_2PART_UINT64_C(0xFB9B7CD9, A4A7443C), 720, 236},
+ {V8_2PART_UINT64_C(0xBB764C4C, A7A44410), 747, 244},
+ {V8_2PART_UINT64_C(0x8BAB8EEF, B6409C1A), 774, 252},
+ {V8_2PART_UINT64_C(0xD01FEF10, A657842C), 800, 260},
+ {V8_2PART_UINT64_C(0x9B10A4E5, E9913129), 827, 268},
+ {V8_2PART_UINT64_C(0xE7109BFB, A19C0C9D), 853, 276},
+ {V8_2PART_UINT64_C(0xAC2820D9, 623BF429), 880, 284},
+ {V8_2PART_UINT64_C(0x80444B5E, 7AA7CF85), 907, 292},
+ {V8_2PART_UINT64_C(0xBF21E440, 03ACDD2D), 933, 300},
+ {V8_2PART_UINT64_C(0x8E679C2F, 5E44FF8F), 960, 308},
+ {V8_2PART_UINT64_C(0xD433179D, 9C8CB841), 986, 316},
+ {V8_2PART_UINT64_C(0x9E19DB92, B4E31BA9), 1013, 324},
+ {V8_2PART_UINT64_C(0xEB96BF6E, BADF77D9), 1039, 332},
+ {V8_2PART_UINT64_C(0xAF87023B, 9BF0EE6B), 1066, 340},
};
#ifdef DEBUG
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 245f2334f6..e5b72b6fab 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -30,16 +30,6 @@ Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
}
// static
-Callable CodeFactory::LoadICProtoArray(Isolate* isolate,
- bool throw_if_nonexistent) {
- return Callable(
- throw_if_nonexistent
- ? BUILTIN_CODE(isolate, LoadICProtoArrayThrowIfNonexistent)
- : BUILTIN_CODE(isolate, LoadICProtoArray),
- LoadICProtoArrayDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
CallApiGetterStub stub(isolate);
return make_callable(stub);
@@ -84,22 +74,6 @@ Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
}
// static
-Callable CodeFactory::StoreGlobalIC(Isolate* isolate,
- LanguageMode language_mode) {
- // TODO(ishell): Use StoreGlobalIC[Strict]Trampoline when it's ready.
- return Callable(BUILTIN_CODE(isolate, StoreICTrampoline),
- StoreDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StoreGlobalICInOptimizedCode(Isolate* isolate,
- LanguageMode language_mode) {
- // TODO(ishell): Use StoreGlobalIC[Strict] when it's ready.
- return Callable(BUILTIN_CODE(isolate, StoreIC),
- StoreWithVectorDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::BinaryOperation(Isolate* isolate, Operation op) {
switch (op) {
case Operation::kShiftRight:
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index d85ca5f073..079f16899a 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -24,13 +24,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
// Initial states for ICs.
- static Callable LoadICProtoArray(Isolate* isolate, bool throw_if_nonexistent);
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
- static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
- static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
- LanguageMode mode);
static Callable StoreOwnIC(Isolate* isolate);
static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index e36a5cc796..f98e7fe519 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -175,7 +175,9 @@ Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
MachineRepresentation::kTaggedSigned);
}
-Node* CodeStubAssembler::NoContextConstant() { return SmiConstant(0); }
+Node* CodeStubAssembler::NoContextConstant() {
+ return SmiConstant(Context::kNoContext);
+}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
@@ -546,8 +548,8 @@ TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -565,8 +567,8 @@ TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
- GotoIfNumericGreaterThanOrEqual(a, b, &greater_than_equal_a);
- GotoIfNumericGreaterThanOrEqual(b, a, &greater_than_equal_b);
+ GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
+ GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
@@ -642,8 +644,9 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
return TNode<Object>::UncheckedCast(var_result.value());
}
-Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
+ SloppyTNode<Smi> b) {
+ TVARIABLE(Number, var_result);
VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
Label return_result(this, &var_result);
@@ -668,7 +671,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
BIND(&answer_not_zero);
{
- var_result.Bind(ChangeInt32ToTagged(answer));
+ var_result = ChangeInt32ToTagged(answer);
Goto(&return_result);
}
BIND(&answer_zero);
@@ -679,12 +682,12 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
&if_should_be_zero);
BIND(&if_should_be_negative_zero);
{
- var_result.Bind(MinusZeroConstant());
+ var_result = MinusZeroConstant();
Goto(&return_result);
}
BIND(&if_should_be_zero);
{
- var_result.Bind(SmiConstant(0));
+ var_result = SmiConstant(0);
Goto(&return_result);
}
}
@@ -694,13 +697,12 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
+ var_result = AllocateHeapNumberWithValue(value);
Goto(&return_result);
}
BIND(&return_result);
- return var_result.value();
+ return var_result;
}
Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
@@ -898,7 +900,7 @@ void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
}
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
-#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
Node* const force_slow_path_addr =
ExternalConstant(ExternalReference::force_slow_path(isolate()));
Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
@@ -1534,14 +1536,15 @@ Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
return LoadObjectField(object, JSValue::kValueOffset);
}
-Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
+TNode<Object> CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
// TODO(ishell): fix callers.
return LoadObjectField(weak_cell, WeakCell::kValueOffset);
}
-Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
+TNode<Object> CodeStubAssembler::LoadWeakCellValue(
+ SloppyTNode<WeakCell> weak_cell, Label* if_cleared) {
CSA_ASSERT(this, IsWeakCell(weak_cell));
- Node* value = LoadWeakCellValueUnchecked(weak_cell);
+ TNode<Object> value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
@@ -1624,17 +1627,16 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
}
}
-Node* CodeStubAssembler::LoadFeedbackVectorSlot(Node* object,
- Node* slot_index_node,
- int additional_offset,
- ParameterMode parameter_mode) {
+TNode<Object> CodeStubAssembler::LoadFeedbackVectorSlot(
+ Node* object, Node* slot_index_node, int additional_offset,
+ ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
+ return UncheckedCast<Object>(Load(MachineType::AnyTagged(), object, offset));
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
@@ -1788,8 +1790,8 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
CSA_ASSERT(this, TaggedIsNotSmi(function));
CSA_ASSERT(this, IsJSFunction(function));
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
- CSA_ASSERT(this, IsClearWord32(LoadMapBitField(LoadMap(function)),
- 1 << Map::kHasNonInstancePrototype));
+ CSA_ASSERT(this, IsClearWord32<Map::HasNonInstancePrototypeBit>(
+ LoadMapBitField(LoadMap(function))));
Node* proto_or_map =
LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
GotoIf(IsTheHole(proto_or_map), if_bailout);
@@ -1943,10 +1945,10 @@ Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) {
Comment("Disallow pushing onto prototypes");
Node* map = LoadMap(receiver);
Node* bit_field2 = LoadMapBitField2(map);
- int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
- (1 << Map::kIsExtensible);
+ int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
Node* test = Word32And(bit_field2, Int32Constant(mask));
- GotoIf(Word32NotEqual(test, Int32Constant(1 << Map::kIsExtensible)), bailout);
+ GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
+ bailout);
// Disallow pushing onto arrays in dictionary named property mode. We need
// to figure out whether the length property is still writable.
@@ -1994,7 +1996,10 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = *arg_index;
- Node* growth = WordToParameter(IntPtrSub(args->GetLength(), first), mode);
+ Node* growth = WordToParameter(
+ IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
+ first),
+ mode);
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, &pre_bailout);
@@ -2548,8 +2553,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this,
- IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map)));
+ CSA_ASSERT(
+ this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
Heap::kUndefinedValueRootIndex);
}
@@ -2564,7 +2569,8 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Node* bit_field3 = LoadMapBitField3(map);
Label end(this), slack_tracking(this), complete(this, Label::kDeferred);
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+ GotoIf(IsSetWord32<Map::ConstructionCounterBits>(bit_field3),
+ &slack_tracking);
Comment("No slack tracking");
InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
Goto(&end);
@@ -2574,9 +2580,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Comment("Decrease construction counter");
// Slack tracking is only done on initial maps.
CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ STATIC_ASSERT(Map::ConstructionCounterBits::kNext == 32);
Node* new_bit_field3 = Int32Sub(
- bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ bit_field3, Int32Constant(1 << Map::ConstructionCounterBits::kShift));
StoreObjectFieldNoWriteBarrier(map, Map::kBitField3Offset, new_bit_field3,
MachineRepresentation::kWord32);
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -2595,7 +2601,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
Heap::kUndefinedValueRootIndex);
- GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &complete);
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ GotoIf(IsClearWord32<Map::ConstructionCounterBits>(new_bit_field3),
+ &complete);
Goto(&end);
}
@@ -3346,7 +3354,8 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
CSA_SLOW_ASSERT(this, MatchesParameterMode(old_capacity, mode));
Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
- Node* padding = IntPtrOrSmiConstant(16, mode);
+ Node* padding =
+ IntPtrOrSmiConstant(JSObject::kMinAddedElementsCapacity, mode);
return IntPtrOrSmiAdd(new_capacity, padding, mode);
}
@@ -3505,8 +3514,8 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
VARIABLE(var_result, MachineRepresentation::kWord32);
Label done(this);
- TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumber>(
- context, value, &done, &var_result);
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
+ &done, &var_result);
BIND(&done);
return var_result.value();
}
@@ -3518,7 +3527,7 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
Variable* var_word32,
Label* if_bigint,
Variable* var_bigint) {
- TaggedToWord32OrBigIntImpl<Feedback::kNone, Object::Conversion::kToNumeric>(
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, if_bigint, var_bigint);
}
@@ -3528,13 +3537,12 @@ void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
Node* context, Node* value, Label* if_number, Variable* var_word32,
Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
- TaggedToWord32OrBigIntImpl<Feedback::kCollect,
- Object::Conversion::kToNumeric>(
+ TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
context, value, if_number, var_word32, if_bigint, var_bigint,
var_feedback);
}
-template <CodeStubAssembler::Feedback feedback, Object::Conversion conversion>
+template <Object::Conversion conversion>
void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
Node* context, Node* value, Label* if_number, Variable* var_word32,
Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
@@ -3546,14 +3554,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We might need to loop after conversion.
VARIABLE(var_value, MachineRepresentation::kTagged, value);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
- } else {
- DCHECK(var_feedback == nullptr);
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
Variable* loop_vars[] = {&var_value, var_feedback};
- int num_vars = feedback == Feedback::kCollect ? arraysize(loop_vars)
- : arraysize(loop_vars) - 1;
+ int num_vars =
+ var_feedback != nullptr ? arraysize(loop_vars) : arraysize(loop_vars) - 1;
Label loop(this, num_vars, loop_vars);
Goto(&loop);
BIND(&loop);
@@ -3565,11 +3569,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// {value} is a Smi.
var_word32->Bind(SmiToWord32(value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kSignedSmall)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
BIND(&not_smi);
@@ -3582,7 +3582,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// Not HeapNumber (or BigInt if conversion == kToNumeric).
{
- if (feedback == Feedback::kCollect) {
+ if (var_feedback != nullptr) {
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
@@ -3595,36 +3595,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
? Builtins::kNonNumberToNumeric
: Builtins::kNonNumberToNumber;
var_value.Bind(CallBuiltin(builtin, context, value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&loop);
BIND(&is_oddball);
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- }
+ OverwriteFeedback(var_feedback,
+ BinaryOperationFeedback::kNumberOrOddball);
Goto(&loop);
}
BIND(&is_heap_number);
var_word32->Bind(TruncateHeapNumberValueToWord32(value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kNumber)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
if (conversion == Object::Conversion::kToNumeric) {
BIND(&is_bigint);
var_bigint->Bind(value);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(
- SmiOr(var_feedback->value(),
- SmiConstant(BinaryOperationFeedback::kBigInt)));
- }
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
}
@@ -3822,9 +3811,7 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
}
TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
- SloppyTNode<Number> value) {
- // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
- CSA_SLOW_ASSERT(this, IsNumber(value));
+ TNode<Number> value) {
TVARIABLE(UintPtrT, result);
Label smi(this), done(this, &result);
GotoIf(TaggedIsSmi(value), &smi);
@@ -4032,43 +4019,30 @@ Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
-Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
- uint32_t mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
- USE(mask);
- // Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
- return is_special;
-}
-
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
- return IsSetWord32<Map::DictionaryMap>(bit_field3);
+ return IsSetWord32<Map::IsDictionaryMapBit>(bit_field3);
}
Node* CodeStubAssembler::IsExtensibleMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField2(map), 1 << Map::kIsExtensible);
+ return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
}
Node* CodeStubAssembler::IsCallableMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsCallable);
+ return IsSetWord32<Map::IsCallableBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsDeprecatedMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32<Map::Deprecated>(LoadMapBitField3(map));
+ return IsSetWord32<Map::IsDeprecatedBit>(LoadMapBitField3(map));
}
Node* CodeStubAssembler::IsUndetectableMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsUndetectable);
+ return IsSetWord32<Map::IsUndetectableBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
@@ -4104,7 +4078,7 @@ Node* CodeStubAssembler::IsCell(Node* object) {
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsConstructor);
+ return IsSetWord32<Map::IsConstructorBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsConstructor(Node* object) {
@@ -4113,7 +4087,7 @@ Node* CodeStubAssembler::IsConstructor(Node* object) {
Node* CodeStubAssembler::IsFunctionWithPrototypeSlotMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32(LoadMapBitField(map), 1 << Map::kHasPrototypeSlot);
+ return IsSetWord32<Map::HasPrototypeSlotBit>(LoadMapBitField(map));
}
Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
@@ -4500,13 +4474,12 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
Label check_upper_bound(this), check_is_integer(this), out(this),
return_false(this);
- GotoIfNumericGreaterThanOrEqual(number, NumberConstant(0),
- &check_upper_bound);
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(0), &check_upper_bound);
Goto(&return_false);
BIND(&check_upper_bound);
- GotoIfNumericGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
- &return_false);
+ GotoIfNumberGreaterThanOrEqual(number, NumberConstant(kMaxUInt32),
+ &return_false);
Goto(&check_is_integer);
BIND(&check_is_integer);
@@ -4525,14 +4498,14 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
return var_result.value();
}
-TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> index) {
+TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(index, IntPtrConstant(0)));
CSA_ASSERT(this, IntPtrLessThan(index, LoadStringLengthAsWord(string)));
- VARIABLE(var_result, MachineRepresentation::kWord32);
+ TVARIABLE(Int32T, var_result);
Label return_result(this), if_runtime(this, Label::kDeferred),
if_stringistwobyte(this), if_stringisonebyte(this);
@@ -4550,14 +4523,16 @@ TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
BIND(&if_stringisonebyte);
{
- var_result.Bind(Load(MachineType::Uint8(), string_data, offset));
+ var_result =
+ UncheckedCast<Int32T>(Load(MachineType::Uint8(), string_data, offset));
Goto(&return_result);
}
BIND(&if_stringistwobyte);
{
- var_result.Bind(Load(MachineType::Uint16(), string_data,
- WordShl(offset, IntPtrConstant(1))));
+ var_result =
+ UncheckedCast<Int32T>(Load(MachineType::Uint16(), string_data,
+ WordShl(offset, IntPtrConstant(1))));
Goto(&return_result);
}
@@ -4565,15 +4540,15 @@ TNode<Uint32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
{
Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
string, SmiTag(index));
- var_result.Bind(SmiToWord32(result));
+ var_result = SmiToWord32(result);
Goto(&return_result);
}
BIND(&return_result);
- return UncheckedCast<Uint32T>(var_result.value());
+ return var_result;
}
-Node* CodeStubAssembler::StringFromCharCode(Node* code) {
+TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
VARIABLE(var_result, MachineRepresentation::kTagged);
// Check if the {code} is a one-byte char code.
@@ -4627,7 +4602,7 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
BIND(&if_done);
CSA_ASSERT(this, IsString(var_result.value()));
- return var_result.value();
+ return CAST(var_result.value());
}
// A wrapper around CopyStringCharacters which determines the correct string
@@ -4787,7 +4762,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
- Node* char_code = StringCharCodeAt(string, SmiUntag(from));
+ TNode<Int32T> char_code = StringCharCodeAt(string, SmiUntag(from));
var_result.Bind(StringFromCharCode(char_code));
Goto(&end);
}
@@ -5161,8 +5136,8 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
return result.value();
}
-Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
- UnicodeEncoding encoding) {
+TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
+ UnicodeEncoding encoding) {
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -5194,7 +5169,7 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
Int32Constant(0xDC00));
// codpoint = (trail << 16) | lead;
- codepoint = Word32Or(Word32Shl(trail, Int32Constant(16)), lead);
+ codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead));
break;
}
}
@@ -5209,12 +5184,10 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
}
BIND(&return_result);
- CSA_ASSERT(this, IsString(var_result.value()));
- return var_result.value();
+ return CAST(var_result.value());
}
-TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
- SloppyTNode<String> input) {
+TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
CSA_SLOW_ASSERT(this, IsString(input));
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -5226,12 +5199,14 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
GotoIf(IsSetWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
&runtime);
- var_result = SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash));
+ var_result =
+ SmiTag(Signed(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
Goto(&end);
BIND(&runtime);
{
- var_result = CAST(CallRuntime(Runtime::kStringToNumber, context, input));
+ var_result =
+ CAST(CallRuntime(Runtime::kStringToNumber, NoContextConstant(), input));
Goto(&end);
}
@@ -5239,7 +5214,7 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<Context> context,
return var_result;
}
-Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
+Node* CodeStubAssembler::NumberToString(Node* argument) {
VARIABLE(result, MachineRepresentation::kTagged);
Label runtime(this, Label::kDeferred), smi(this), done(this, &result);
@@ -5290,7 +5265,8 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
BIND(&runtime);
{
// No cache entry, go to the runtime.
- result.Bind(CallRuntime(Runtime::kNumberToString, context, argument));
+ result.Bind(CallRuntime(Runtime::kNumberToStringSkipCache,
+ NoContextConstant(), argument));
}
Goto(&done);
@@ -5393,7 +5369,7 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- var_result.Bind(StringToNumber(context, input));
+ var_result.Bind(StringToNumber(input));
Goto(&end);
}
@@ -5525,18 +5501,17 @@ TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric) {
- TaggedToNumeric<Feedback::kNone>(context, value, done, var_numeric);
+ TaggedToNumeric(context, value, done, var_numeric, nullptr);
}
void CodeStubAssembler::TaggedToNumericWithFeedback(Node* context, Node* value,
Label* done,
Variable* var_numeric,
Variable* var_feedback) {
- TaggedToNumeric<Feedback::kCollect>(context, value, done, var_numeric,
- var_feedback);
+ DCHECK_NOT_NULL(var_feedback);
+ TaggedToNumeric(context, value, done, var_numeric, var_feedback);
}
-template <CodeStubAssembler::Feedback feedback>
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric,
Variable* var_feedback) {
@@ -5551,34 +5526,24 @@ void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
// {value} is not a Numeric yet.
GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
var_numeric->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, value));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(done);
BIND(&if_smi);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(done);
BIND(&if_heapnumber);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(done);
BIND(&if_bigint);
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- }
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(done);
BIND(&if_oddball);
+ OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumberOrOddball);
var_numeric->Bind(LoadObjectField(value, Oddball::kToNumberOffset));
- if (feedback == Feedback::kCollect) {
- var_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- }
Goto(done);
}
@@ -5702,7 +5667,7 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
BIND(&is_number);
- result.Bind(NumberToString(context, input));
+ result.Bind(NumberToString(input));
Goto(&done);
BIND(&not_heap_number);
@@ -5774,8 +5739,8 @@ Node* CodeStubAssembler::ToSmiIndex(Node* const input, Node* const context,
Branch(IsUndefined(result.value()), &return_zero, &defined);
BIND(&defined);
- result.Bind(ToInteger(context, result.value(),
- CodeStubAssembler::kTruncateMinusZero));
+ result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
+ CodeStubAssembler::kTruncateMinusZero));
GotoIfNot(TaggedIsSmi(result.value()), range_error);
CSA_ASSERT(this, TaggedIsSmi(result.value()));
Goto(&negative_check);
@@ -5799,8 +5764,8 @@ Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
Branch(TaggedIsSmi(result.value()), &negative_check, &to_integer);
BIND(&to_integer);
- result.Bind(ToInteger(context, result.value(),
- CodeStubAssembler::kTruncateMinusZero));
+ result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
+ CodeStubAssembler::kTruncateMinusZero));
GotoIf(TaggedIsSmi(result.value()), &negative_check);
// result.value() can still be a negative HeapNumber here.
Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, result.value(),
@@ -5828,6 +5793,16 @@ Node* CodeStubAssembler::ToLength_Inline(Node* const context,
MachineRepresentation::kTagged);
}
+TNode<Number> CodeStubAssembler::ToInteger_Inline(
+ TNode<Context> context, TNode<Object> input, ToIntegerTruncationMode mode) {
+ Builtins::Name builtin = (mode == kNoTruncation)
+ ? Builtins::kToInteger
+ : Builtins::kToInteger_TruncateMinusZero;
+ return CAST(Select(TaggedIsSmi(input), [=] { return input; },
+ [=] { return CallBuiltin(builtin, context, input); },
+ MachineRepresentation::kTagged));
+}
+
TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode) {
@@ -5886,6 +5861,7 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
}
BIND(&out);
+ if (mode == kTruncateMinusZero) CSA_ASSERT(this, IsNumberNormalized(var_arg));
return CAST(var_arg);
}
@@ -5895,8 +5871,10 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
Word32And(word32, Int32Constant(mask)), static_cast<int>(shift)));
}
-Node* CodeStubAssembler::DecodeWord(Node* word, uint32_t shift, uint32_t mask) {
- return WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift));
+TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
+ uint32_t shift, uint32_t mask) {
+ return Unsigned(
+ WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift)));
}
Node* CodeStubAssembler::UpdateWord(Node* word, Node* value, uint32_t shift,
@@ -6187,14 +6165,14 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
Node* hash = TruncateWordToWord32(key);
hash = Word32Xor(hash, seed);
- hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
+ hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)),
Word32Shl(hash, Int32Constant(15)));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12)));
hash = Int32Add(hash, Word32Shl(hash, Int32Constant(2)));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(4)));
hash = Int32Mul(hash, Int32Constant(2057));
hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(16)));
- return Word32And(hash, Int32Constant(0x3fffffff));
+ return Word32And(hash, Int32Constant(0x3FFFFFFF));
}
void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
@@ -6391,36 +6369,38 @@ Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
}
-namespace {
-
-Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
- Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
- Node* index = a->Int32Mul(descriptor_number, descriptor_size);
- return a->ChangeInt32ToIntPtr(index);
+Node* CodeStubAssembler::DescriptorNumberToIndex(
+ SloppyTNode<Uint32T> descriptor_number) {
+ Node* descriptor_size = Int32Constant(DescriptorArray::kEntrySize);
+ Node* index = Int32Mul(descriptor_number, descriptor_size);
+ return ChangeInt32ToIntPtr(index);
}
-} // namespace
-
Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- DescriptorNumberToIndex(this, descriptor_number));
+ DescriptorNumberToIndex(descriptor_number));
}
Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
Node* descriptors, Node* descriptor_number) {
- const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorNumberToIndex(this, descriptor_number),
- details_offset);
+ Node* details = DescriptorArrayGetDetails(
+ TNode<DescriptorArray>::UncheckedCast(descriptors),
+ TNode<Uint32T>::UncheckedCast(descriptor_number));
return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
}
Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
Node* descriptor_number) {
const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
- return LoadFixedArrayElement(descriptors,
- DescriptorNumberToIndex(this, descriptor_number),
- key_offset);
+ return LoadFixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), key_offset);
+}
+
+TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
+ TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ return TNode<Uint32T>::UncheckedCast(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), details_offset));
}
void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
@@ -6531,13 +6511,13 @@ void CodeStubAssembler::TryLookupProperty(
&if_objectisspecial);
uint32_t mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
USE(mask);
Node* bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
- Branch(IsSetWord32<Map::DictionaryMap>(bit_field3), &if_isslowmap,
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &if_isslowmap,
&if_isfastmap);
BIND(&if_isfastmap);
{
@@ -6563,7 +6543,8 @@ void CodeStubAssembler::TryLookupProperty(
// Handle interceptors and access checks in runtime.
Node* bit_field = LoadMapBitField(map);
- int mask = 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ int mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
GotoIf(IsSetWord32(bit_field, mask), if_bailout);
Node* dictionary = LoadSlowProperties(object);
@@ -6618,12 +6599,22 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Variable* var_value) {
DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
- Comment("[ LoadPropertyFromFastObject");
Node* details =
LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
var_details->Bind(details);
+ LoadPropertyFromFastObject(object, map, descriptors, name_index, details,
+ var_value);
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+ Node* descriptors,
+ Node* name_index,
+ Node* details,
+ Variable* var_value) {
+ Comment("[ LoadPropertyFromFastObject");
+
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
@@ -6826,13 +6817,12 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
// if (!(has_prototype_slot() && !has_non_instance_prototype())) use
// generic property loading mechanism.
- int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
- int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
GotoIfNot(
- Word32Equal(Word32And(LoadMapBitField(receiver_map),
- Int32Constant(has_prototype_slot_mask |
- has_non_instance_prototype_mask)),
- Int32Constant(has_prototype_slot_mask)),
+ Word32Equal(
+ Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask |
+ Map::HasNonInstancePrototypeBit::kMask)),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask)),
if_bailout);
var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
Goto(&done);
@@ -7061,6 +7051,35 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
}
+void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode<String> name_string,
+ Label* if_maybe_special_index,
+ Label* if_not_special_index) {
+ // TODO(cwhan.tunz): Implement fast cases more.
+
+ // If a name is empty or too long, it's not a special index
+ // Max length of canonical double: -X.XXXXXXXXXXXXXXXXX-eXXX
+ const int kBufferSize = 24;
+ TNode<Smi> string_length = LoadStringLengthAsSmi(name_string);
+ GotoIf(SmiEqual(string_length, SmiConstant(0)), if_not_special_index);
+ GotoIf(SmiGreaterThan(string_length, SmiConstant(kBufferSize)),
+ if_not_special_index);
+
+ // If the first character of name is not a digit or '-', or we can't match it
+ // to Infinity or NaN, then this is not a special index.
+ TNode<Int32T> first_char = StringCharCodeAt(name_string, IntPtrConstant(0));
+ // If the name starts with '-', it can be a negative index.
+ GotoIf(Word32Equal(first_char, Int32Constant('-')), if_maybe_special_index);
+ // If the name starts with 'I', it can be "Infinity".
+ GotoIf(Word32Equal(first_char, Int32Constant('I')), if_maybe_special_index);
+ // If the name starts with 'N', it can be "NaN".
+ GotoIf(Word32Equal(first_char, Int32Constant('N')), if_maybe_special_index);
+ // Finally, if the first character is not a digit either, then we are sure
+ // that the name is not a special index.
+ GotoIf(Uint32LessThan(first_char, Int32Constant('0')), if_not_special_index);
+ GotoIf(Uint32LessThan(Int32Constant('9'), first_char), if_not_special_index);
+ Goto(if_maybe_special_index);
+}
+
void CodeStubAssembler::TryPrototypeChainLookup(
Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder, Label* if_end,
@@ -7108,15 +7127,22 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Node* holder_map = var_holder_map.value();
Node* holder_instance_type = var_holder_instance_type.value();
- Label next_proto(this);
+ Label next_proto(this), check_integer_indexed_exotic(this);
lookup_property_in_holder(receiver, var_holder.value(), holder_map,
holder_instance_type, var_unique.value(),
- &next_proto, if_bailout);
- BIND(&next_proto);
+ &check_integer_indexed_exotic, if_bailout);
- // Bailout if it can be an integer indexed exotic case.
- GotoIf(InstanceTypeEqual(holder_instance_type, JS_TYPED_ARRAY_TYPE),
- if_bailout);
+ BIND(&check_integer_indexed_exotic);
+ {
+ // Bailout if it can be an integer indexed exotic case.
+ GotoIfNot(InstanceTypeEqual(holder_instance_type, JS_TYPED_ARRAY_TYPE),
+ &next_proto);
+ GotoIfNot(IsString(var_unique.value()), &next_proto);
+ BranchIfMaybeSpecialIndex(CAST(var_unique.value()), if_bailout,
+ &next_proto);
+ }
+
+ BIND(&next_proto);
Node* proto = LoadMapPrototype(holder_map);
@@ -7192,8 +7218,8 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
GotoIf(InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE),
&return_runtime);
Node* object_bitfield = LoadMapBitField(object_map);
- int mask =
- 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ int mask = Map::HasNamedInterceptorBit::kMask |
+ Map::IsAccessCheckNeededBit::kMask;
Branch(IsSetWord32(object_bitfield, mask), &return_runtime,
&if_objectisdirect);
}
@@ -7252,12 +7278,12 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
// Goto runtime if {callable} is not a constructor or has
// a non-instance "prototype".
Node* callable_bitfield = LoadMapBitField(callable_map);
- GotoIfNot(
- Word32Equal(Word32And(callable_bitfield,
- Int32Constant((1 << Map::kHasNonInstancePrototype) |
- (1 << Map::kIsConstructor))),
- Int32Constant(1 << Map::kIsConstructor)),
- &return_runtime);
+ GotoIfNot(Word32Equal(
+ Word32And(callable_bitfield,
+ Int32Constant(Map::HasNonInstancePrototypeBit::kMask |
+ Map::IsConstructorBit::kMask)),
+ Int32Constant(Map::IsConstructorBit::kMask)),
+ &return_runtime);
// Get the "prototype" (or initial map) of the {callable}.
Node* callable_prototype =
@@ -7326,7 +7352,7 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
? index_node
: ((element_size_shift > 0)
? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+ : WordSar(index_node, IntPtrConstant(-element_size_shift)));
return IntPtrAdd(IntPtrConstant(base_size), shifted_index);
}
@@ -7377,8 +7403,22 @@ void CodeStubAssembler::ReportFeedbackUpdate(
#endif // V8_TRACE_FEEDBACK_UPDATES
}
+void CodeStubAssembler::OverwriteFeedback(Variable* existing_feedback,
+ int new_feedback) {
+ if (existing_feedback == nullptr) return;
+ existing_feedback->Bind(SmiConstant(new_feedback));
+}
+
+void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
+ int feedback) {
+ if (existing_feedback == nullptr) return;
+ existing_feedback->Bind(
+ SmiOr(existing_feedback->value(), SmiConstant(feedback)));
+}
+
void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
Node* feedback) {
+ if (existing_feedback == nullptr) return;
existing_feedback->Bind(SmiOr(existing_feedback->value(), feedback));
}
@@ -7524,15 +7564,16 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
return var_result.value();
}
-Node* CodeStubAssembler::LoadScriptContext(Node* context, int context_index) {
- Node* native_context = LoadNativeContext(context);
- Node* script_context_table =
- LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX);
+TNode<Context> CodeStubAssembler::LoadScriptContext(
+ TNode<Context> context, TNode<IntPtrT> context_index) {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<ScriptContextTable> script_context_table = CAST(
+ LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX));
- int offset =
- ScriptContextTable::GetContextOffset(context_index) - kHeapObjectTag;
- return Load(MachineType::AnyTagged(), script_context_table,
- IntPtrConstant(offset));
+ Node* script_context = LoadFixedArrayElement(
+ script_context_table, context_index,
+ ScriptContextTable::kFirstContextSlotIndex * kPointerSize);
+ return CAST(script_context);
}
namespace {
@@ -7567,7 +7608,7 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
if (IsFixedTypedArrayElementsKind(kind)) {
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this,
- Word32Equal(value, Word32And(Int32Constant(0xff), value)));
+ Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
}
Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
@@ -8029,8 +8070,8 @@ Node* CodeStubAssembler::BuildFastLoop(
? MachineType::PointerRepresentation()
: MachineRepresentation::kTaggedSigned;
VARIABLE(var, index_rep, start_index);
- VariableList vars_copy(vars, zone());
- vars_copy.Add(&var, zone());
+ VariableList vars_copy(vars.begin(), vars.end(), zone());
+ vars_copy.push_back(&var);
Label loop(this, vars_copy);
Label after_loop(this);
// Introduce an explicit second check of the termination condition before the
@@ -8135,109 +8176,86 @@ void CodeStubAssembler::InitializeFieldsWithRoot(
CodeStubAssembler::IndexAdvanceMode::kPre);
}
-void CodeStubAssembler::BranchIfNumericRelationalComparison(
- Operation op, Node* lhs, Node* rhs, Label* if_true, Label* if_false) {
- CSA_SLOW_ASSERT(this, IsNumber(lhs));
- CSA_SLOW_ASSERT(this, IsNumber(rhs));
+void CodeStubAssembler::BranchIfNumberRelationalComparison(
+ Operation op, Node* left, Node* right, Label* if_true, Label* if_false) {
+ CSA_SLOW_ASSERT(this, IsNumber(left));
+ CSA_SLOW_ASSERT(this, IsNumber(right));
- Label end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(this);
- VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
+ Label do_float_comparison(this);
+ TVARIABLE(Float64T, var_left_float);
+ TVARIABLE(Float64T, var_right_float);
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_not_smi(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_not_smi(this);
+ GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
- BIND(&if_rhsissmi);
- {
- // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- switch (op) {
- case Operation::kLessThan:
- BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
- break;
- case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
- break;
- case Operation::kGreaterThan:
- BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
- break;
- case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
- break;
- default:
- UNREACHABLE();
- }
+ // Both {left} and {right} are Smi, so just perform a fast Smi comparison.
+ switch (op) {
+ case Operation::kLessThan:
+ BranchIfSmiLessThan(left, right, if_true, if_false);
+ break;
+ case Operation::kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(left, right, if_true, if_false);
+ break;
+ case Operation::kGreaterThan:
+ BranchIfSmiLessThan(right, left, if_true, if_false);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(right, left, if_true, if_false);
+ break;
+ default:
+ UNREACHABLE();
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_not_smi);
{
- CSA_ASSERT(this, IsHeapNumber(rhs));
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_left_float = SmiToFloat64(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
}
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_not_smi);
{
- CSA_ASSERT(this, IsHeapNumber(lhs));
+ CSA_ASSERT(this, IsHeapNumber(left));
+ var_left_float = LoadHeapNumberValue(left);
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_not_smi(this);
+ GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
- BIND(&if_rhsissmi);
+ BIND(&if_right_not_smi);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- CSA_ASSERT(this, IsHeapNumber(rhs));
-
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
}
- BIND(&do_fcmp);
+ BIND(&do_float_comparison);
{
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(lhs, rhs), if_true, if_false);
+ Branch(Float64LessThan(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
+ Branch(Float64LessThanOrEqual(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
+ Branch(Float64GreaterThan(var_left_float, var_right_float), if_true,
+ if_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
+ Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ if_true, if_false);
break;
default:
UNREACHABLE();
@@ -8245,11 +8263,11 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
}
}
-void CodeStubAssembler::GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs,
- Label* if_true) {
+void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(Node* left, Node* right,
+ Label* if_true) {
Label if_false(this);
- BranchIfNumericRelationalComparison(Operation::kGreaterThanOrEqual, lhs, rhs,
- if_true, &if_false);
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
+ right, if_true, &if_false);
BIND(&if_false);
}
@@ -8271,423 +8289,354 @@ Operation Reverse(Operation op) {
}
} // anonymous namespace
-Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
- Node* rhs, Node* context,
+Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
+ Node* right, Node* context,
Variable* var_type_feedback) {
- Label return_true(this), return_false(this), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(this);
- VARIABLE(var_fcmp_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fcmp_rhs, MachineRepresentation::kFloat64);
+ Label return_true(this), return_false(this), do_float_comparison(this),
+ end(this);
+ TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
+ TVARIABLE(Float64T, var_left_float);
+ TVARIABLE(Float64T, var_right_float);
// We might need to loop several times due to ToPrimitive and/or ToNumeric
// conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
- VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
- VariableList loop_variable_list({&var_lhs, &var_rhs}, zone());
+ VARIABLE(var_left, MachineRepresentation::kTagged, left);
+ VARIABLE(var_right, MachineRepresentation::kTagged, right);
+ VariableList loop_variable_list({&var_left, &var_right}, zone());
if (var_type_feedback != nullptr) {
// Initialize the type feedback to None. The current feedback is combined
// with the previous feedback.
var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
- loop_variable_list.Add(var_type_feedback, zone());
+ loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
Goto(&loop);
BIND(&loop);
{
- // Load the current {lhs} and {rhs} values.
- lhs = var_lhs.value();
- rhs = var_rhs.value();
+ left = var_left.value();
+ right = var_right.value();
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_not_smi(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- Label if_rhsissmi(this), if_rhsisheapnumber(this),
- if_rhsisbigint(this, Label::kDeferred),
- if_rhsisnotnumeric(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(rhs), &if_rhsissmi);
- Node* rhs_map = LoadMap(rhs);
- GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
+ Label if_right_smi(this), if_right_heapnumber(this),
+ if_right_bigint(this, Label::kDeferred),
+ if_right_not_numeric(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(right), &if_right_smi);
+ Node* right_map = LoadMap(right);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
switch (op) {
case Operation::kLessThan:
- BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
+ BranchIfSmiLessThan(left, right, &return_true, &return_false);
break;
case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
+ BranchIfSmiLessThanOrEqual(left, right, &return_true,
+ &return_false);
break;
case Operation::kGreaterThan:
- BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
+ BranchIfSmiLessThan(right, left, &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
+ BranchIfSmiLessThanOrEqual(right, left, &return_true,
+ &return_false);
break;
default:
UNREACHABLE();
}
}
- BIND(&if_rhsisheapnumber);
+ BIND(&if_right_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = SmiToFloat64(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- // The {lhs} is a Smi and {rhs} is a BigInt.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(Reverse(op)),
- rhs, lhs));
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(),
+ SmiConstant(Reverse(op)), right, left));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // The {lhs} is a Smi and {rhs} is not a Numeric.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {rhs} to a Numeric; we don't need to perform the
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform the
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_not_smi);
{
- Node* lhs_map = LoadMap(lhs);
+ Node* left_map = LoadMap(left);
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_smi(this), if_right_not_smi(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
- if_lhsisnotnumeric(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
- &if_lhsisnotnumeric);
-
- BIND(&if_lhsisheapnumber);
+ Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
+ if_left_not_numeric(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ Branch(IsBigIntInstanceType(left_instance_type), &if_left_bigint,
+ &if_left_not_numeric);
+
+ BIND(&if_left_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = LoadHeapNumberValue(left);
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_lhsisbigint);
+ BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_lhsisnotnumeric);
+ BIND(&if_left_not_numeric);
{
- // The {lhs} is not a Numeric and {rhs} is an Smi.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {lhs} to a Numeric; we don't need to perform the
- // dedicated ToPrimitive(lhs, hint Number) operation, as the
- // ToNumeric(lhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ // Convert {left} to a Numeric; we don't need to perform the
+ // dedicated ToPrimitive(left, hint Number) operation, as the
+ // ToNumeric(left) will by itself already invoke ToPrimitive with
// a Number hint.
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
Goto(&loop);
}
}
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_not_smi);
{
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ Node* right_map = LoadMap(right);
- // Further analyze {lhs}.
- Label if_lhsisheapnumber(this), if_lhsisbigint(this, Label::kDeferred),
- if_lhsisstring(this), if_lhsisother(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
- GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
- &if_lhsisother);
+ Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
+ if_left_string(this), if_left_other(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
+ Branch(IsStringInstanceType(left_instance_type), &if_left_string,
+ &if_left_other);
- BIND(&if_lhsisheapnumber);
+ BIND(&if_left_heapnumber);
{
- // Further inspect {rhs}.
- Label if_rhsisheapnumber(this),
- if_rhsisbigint(this, Label::kDeferred),
- if_rhsisnotnumeric(this, Label::kDeferred);
- GotoIf(WordEqual(rhs_map, lhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
-
- BIND(&if_rhsisheapnumber);
+ Label if_right_heapnumber(this),
+ if_right_bigint(this, Label::kDeferred),
+ if_right_not_numeric(this, Label::kDeferred);
+ GotoIf(WordEqual(right_map, left_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
+
+ BIND(&if_right_heapnumber);
{
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
+ var_left_float = LoadHeapNumberValue(left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(),
- SmiConstant(Reverse(op)), rhs, lhs));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(
+ Runtime::kBigIntCompareToNumber, NoContextConstant(),
+ SmiConstant(Reverse(op)), right, left));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // The {lhs} is a HeapNumber and {rhs} is not a Numeric.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // Convert the {rhs} to a Numeric; we don't need to perform
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisbigint);
+ BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
-
- Label if_rhsisheapnumber(this), if_rhsisbigint(this),
- if_rhsisnotnumeric(this);
- GotoIf(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotnumeric);
-
- BIND(&if_rhsisheapnumber);
+ Label if_right_heapnumber(this), if_right_bigint(this),
+ if_right_not_numeric(this);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(IsBigIntInstanceType(right_instance_type), &if_right_bigint,
+ &if_right_not_numeric);
+
+ BIND(&if_right_heapnumber);
{
- result.Bind(CallRuntime(Runtime::kBigIntCompareToNumber,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToNumber,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_rhsisbigint);
+ BIND(&if_right_bigint);
{
- result.Bind(CallRuntime(Runtime::kBigIntCompareToBigInt,
- NoContextConstant(), SmiConstant(op), lhs,
- rhs));
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt);
+ var_result = CAST(CallRuntime(Runtime::kBigIntCompareToBigInt,
+ NoContextConstant(), SmiConstant(op),
+ left, right));
Goto(&end);
}
- BIND(&if_rhsisnotnumeric);
+ BIND(&if_right_not_numeric);
{
- // Convert the {rhs} to a Numeric; we don't need to perform
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumeric(rhs) will by itself already invoke ToPrimitive with
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // Convert {right} to a Numeric; we don't need to perform
+ // dedicated ToPrimitive(right, hint Number) operation, as the
+ // ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_rhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, rhs));
+ var_right.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, right));
Goto(&loop);
}
}
- BIND(&if_lhsisstring);
+ BIND(&if_left_string);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- // Check if {rhs} is also a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are strings.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kString));
- }
- switch (op) {
- case Operation::kLessThan:
- result.Bind(
- CallBuiltin(Builtins::kStringLessThan, context, lhs, rhs));
- Goto(&end);
- break;
- case Operation::kLessThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringLessThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- case Operation::kGreaterThan:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThan, context,
- lhs, rhs));
- Goto(&end);
- break;
- case Operation::kGreaterThanOrEqual:
- result.Bind(CallBuiltin(Builtins::kStringGreaterThanOrEqual,
- context, lhs, rhs));
- Goto(&end);
- break;
- default:
- UNREACHABLE();
- }
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+
+ Label if_right_not_string(this, Label::kDeferred);
+ GotoIfNot(IsStringInstanceType(right_instance_type),
+ &if_right_not_string);
+
+ // Both {left} and {right} are strings.
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
+ Builtins::Name builtin;
+ switch (op) {
+ case Operation::kLessThan:
+ builtin = Builtins::kStringLessThan;
+ break;
+ case Operation::kLessThanOrEqual:
+ builtin = Builtins::kStringLessThanOrEqual;
+ break;
+ case Operation::kGreaterThan:
+ builtin = Builtins::kStringGreaterThan;
+ break;
+ case Operation::kGreaterThanOrEqual:
+ builtin = Builtins::kStringGreaterThanOrEqual;
+ break;
+ default:
+ UNREACHABLE();
}
+ var_result = CAST(CallBuiltin(builtin, context, left, right));
+ Goto(&end);
- BIND(&if_rhsisnotstring);
+ BIND(&if_right_not_string);
{
- // The {lhs} is a String and {rhs} is not a String.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
- // The {lhs} is a String, while {rhs} isn't. So we call
- // ToPrimitive(rhs, hint Number) if {rhs} is a receiver, or
- // ToNumeric(lhs) and then ToNumeric(rhs) in the other cases.
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
+ // {left} is a String, while {right} isn't. So we call
+ // ToPrimitive(right, hint Number) if {right} is a receiver, or
+ // ToNumeric(left) and then ToNumeric(right) in the other cases.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsJSReceiverInstanceType(right_instance_type),
+ &if_right_receiver);
+
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+ var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
+ Goto(&loop);
- BIND(&if_rhsisreceiver);
+ BIND(&if_right_receiver);
{
- // Convert {rhs} to a primitive first passing Number hint.
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert both {lhs} and {rhs} to Numeric.
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
+ var_right.Bind(CallStub(callable, context, right));
Goto(&loop);
}
}
}
- BIND(&if_lhsisother);
+ BIND(&if_left_other);
{
- // The {lhs} is neither a Numeric nor a String, and {rhs} is not
- // an Smi.
+ // {left} is neither a Numeric nor a String, and {right} is not a Smi.
if (var_type_feedback != nullptr) {
- // Collect NumberOrOddball feedback if {lhs} is an Oddball
- // and {rhs} is either a HeapNumber or Oddball. Otherwise collect
+ // Collect NumberOrOddball feedback if {left} is an Oddball
+ // and {right} is either a HeapNumber or Oddball. Otherwise collect
// Any feedback.
Label collect_any_feedback(this), collect_oddball_feedback(this),
collect_feedback_done(this);
- GotoIfNot(InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE),
+ GotoIfNot(InstanceTypeEqual(left_instance_type, ODDBALL_TYPE),
&collect_any_feedback);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- GotoIf(InstanceTypeEqual(rhs_instance_type, HEAP_NUMBER_TYPE),
- &collect_oddball_feedback);
- Branch(InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE),
+ GotoIf(IsHeapNumberMap(right_map), &collect_oddball_feedback);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ Branch(InstanceTypeEqual(right_instance_type, ODDBALL_TYPE),
&collect_oddball_feedback, &collect_any_feedback);
BIND(&collect_oddball_feedback);
{
- CombineFeedback(
- var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
Goto(&collect_feedback_done);
}
BIND(&collect_any_feedback);
{
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
Goto(&collect_feedback_done);
}
BIND(&collect_feedback_done);
}
- // If {lhs} is a receiver, we must call ToPrimitive(lhs, hint Number).
- // Otherwise we must call ToNumeric(lhs) and then ToNumeric(rhs).
+ // If {left} is a receiver, call ToPrimitive(left, hint Number).
+ // Otherwise call ToNumeric(left) and then ToNumeric(right).
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(lhs_instance_type), &if_lhsisreceiver,
- &if_lhsisnotreceiver);
+ Label if_left_receiver(this, Label::kDeferred);
+ GotoIf(IsJSReceiverInstanceType(left_instance_type),
+ &if_left_receiver);
+
+ var_left.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumeric, context, left));
+ var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right));
+ Goto(&loop);
- BIND(&if_lhsisreceiver);
+ BIND(&if_left_receiver);
{
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- var_lhs.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, lhs));
- var_rhs.Bind(CallBuiltin(Builtins::kToNumeric, context, rhs));
+ var_left.Bind(CallStub(callable, context, left));
Goto(&loop);
}
}
@@ -8695,26 +8644,24 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
}
}
- BIND(&do_fcmp);
+ BIND(&do_float_comparison);
{
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
+ Branch(Float64LessThan(var_left_float, var_right_float), &return_true,
+ &return_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
+ Branch(Float64LessThanOrEqual(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
+ Branch(Float64GreaterThan(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
- &return_false);
+ Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ &return_true, &return_false);
break;
default:
UNREACHABLE();
@@ -8723,18 +8670,18 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* lhs,
BIND(&return_true);
{
- result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&end);
}
BIND(&end);
- return result.value();
+ return var_result;
}
Node* CodeStubAssembler::CollectFeedbackForString(Node* instance_type) {
@@ -8764,10 +8711,11 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
if (var_type_feedback != nullptr) {
Node* instance_type = LoadMapInstanceType(value_map);
- Label if_string(this), if_receiver(this), if_symbol(this),
+ Label if_string(this), if_receiver(this), if_symbol(this), if_bigint(this),
if_other(this, Label::kDeferred);
GotoIf(IsStringInstanceType(instance_type), &if_string);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_receiver);
+ GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
Branch(IsSymbolInstanceType(instance_type), &if_symbol, &if_other);
BIND(&if_string);
@@ -8779,25 +8727,25 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
BIND(&if_symbol);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSymbol));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
Goto(if_equal);
}
BIND(&if_receiver);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(if_equal);
}
- // TODO(neis): Introduce BigInt CompareOperationFeedback and collect here
- // and elsewhere?
+ BIND(&if_bigint);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ Goto(if_equal);
+ }
BIND(&if_other);
{
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kAny));
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Goto(if_equal);
}
} else {
@@ -8806,20 +8754,14 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
BIND(&if_heapnumber);
{
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Node* number_value = LoadHeapNumberValue(value);
BranchIfFloat64IsNaN(number_value, if_notequal, if_equal);
}
BIND(&if_smi);
{
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kSignedSmall);
Goto(if_equal);
}
}
@@ -8847,10 +8789,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
VARIABLE(var_right, MachineRepresentation::kTagged, right);
VariableList loop_variable_list({&var_left, &var_right}, zone());
if (var_type_feedback != nullptr) {
- // Initialize the type feedback to None. The current feedback is combined
- // with the previous feedback.
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
- loop_variable_list.Add(var_type_feedback, zone());
+ // Initialize the type feedback to None. The current feedback will be
+ // combined with the previous feedback.
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone);
+ loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
Goto(&loop);
@@ -8880,10 +8822,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
// We have already checked for {left} and {right} being the same value,
// so when we get here they must be different Smis.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
}
@@ -8908,10 +8848,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
var_left_float = SmiToFloat64(left);
var_right_float = LoadHeapNumberValue(right);
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
}
@@ -8961,11 +8898,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right));
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiOr(CollectFeedbackForString(left_type),
- CollectFeedbackForString(right_type)));
- }
+ CombineFeedback(var_type_feedback,
+ SmiOr(CollectFeedbackForString(left_type),
+ CollectFeedbackForString(right_type)));
Goto(&end);
}
@@ -8976,10 +8911,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
var_left_float = LoadHeapNumberValue(left);
var_right_float = LoadHeapNumberValue(right);
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
BIND(&if_right_not_number);
@@ -9005,10 +8937,6 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_left_bigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
-
Label if_right_heapnumber(this), if_right_bigint(this),
if_right_string(this), if_right_boolean(this);
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
@@ -9020,6 +8948,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_heapnumber);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
NoContextConstant(), left, right));
Goto(&end);
@@ -9027,6 +8959,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_bigint);
{
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), left, right));
Goto(&end);
@@ -9034,6 +8967,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_string);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
result.Bind(CallRuntime(Runtime::kBigIntEqualToString,
NoContextConstant(), left, right));
Goto(&end);
@@ -9041,6 +8978,10 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_boolean);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
var_right.Bind(LoadObjectField(right, Oddball::kToNumberOffset));
Goto(&loop);
}
@@ -9083,7 +9024,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_right_symbol);
{
CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kSymbol));
+ CompareOperationFeedback::kSymbol);
Goto(&if_notequal);
}
} else {
@@ -9109,10 +9050,7 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
GotoIfNot(IsJSReceiverInstanceType(right_type), &if_right_not_receiver);
// {left} and {right} are different JSReceiver references.
- if (var_type_feedback != nullptr) {
- CombineFeedback(var_type_feedback,
- SmiConstant(CompareOperationFeedback::kReceiver));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(&if_notequal);
BIND(&if_right_not_receiver);
@@ -9380,10 +9318,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisbigint);
{
if (var_type_feedback != nullptr) {
- CSA_ASSERT(
- this,
- WordEqual(var_type_feedback->value(),
- SmiConstant(CompareOperationFeedback::kAny)));
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kBigInt));
}
result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), lhs, rhs));
@@ -9607,8 +9543,10 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
}
}
-Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
- HasPropertyLookupMode mode) {
+TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
+ SloppyTNode<Name> key,
+ SloppyTNode<Context> context,
+ HasPropertyLookupMode mode) {
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this), if_proxy(this, Label::kDeferred);
@@ -9633,16 +9571,16 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
lookup_element_in_holder, &return_false,
&call_runtime, &if_proxy);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
BIND(&if_proxy);
{
- Node* name = ToName(context, key);
+ TNode<Name> name = CAST(ToName(context, key));
switch (mode) {
case kHasProperty:
GotoIf(IsPrivateSymbol(name), &return_false);
- result.Bind(
+ result = CAST(
CallBuiltin(Builtins::kProxyHasProperty, context, object, name));
Goto(&end);
break;
@@ -9654,13 +9592,13 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
BIND(&return_true);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&return_false);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -9676,13 +9614,14 @@ Node* CodeStubAssembler::HasProperty(Node* object, Node* key, Node* context,
break;
}
- result.Bind(
- CallRuntime(fallback_runtime_function_id, context, object, key));
+ result =
+ CAST(CallRuntime(fallback_runtime_function_id, context, object, key));
Goto(&end);
}
BIND(&end);
- return result.value();
+ CSA_ASSERT(this, IsBoolean(result));
+ return result;
}
Node* CodeStubAssembler::ClassOf(Node* value) {
@@ -9769,10 +9708,10 @@ Node* CodeStubAssembler::Typeof(Node* value) {
Node* callable_or_undetectable_mask = Word32And(
LoadMapBitField(map),
- Int32Constant(1 << Map::kIsCallable | 1 << Map::kIsUndetectable));
+ Int32Constant(Map::IsCallableBit::kMask | Map::IsUndetectableBit::kMask));
GotoIf(Word32Equal(callable_or_undetectable_mask,
- Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(Map::IsCallableBit::kMask)),
&return_function);
GotoIfNot(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
@@ -10139,17 +10078,17 @@ Node* CodeStubAssembler::BitwiseOp(Node* left32, Node* right32,
return ChangeInt32ToTagged(Signed(Word32Xor(left32, right32)));
case Operation::kShiftLeft:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeInt32ToTagged(Signed(Word32Shl(left32, right32)));
case Operation::kShiftRight:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeInt32ToTagged(Signed(Word32Sar(left32, right32)));
case Operation::kShiftRightLogical:
if (!Word32ShiftIsSafe()) {
- right32 = Word32And(right32, Int32Constant(0x1f));
+ right32 = Word32And(right32, Int32Constant(0x1F));
}
return ChangeUint32ToTagged(Unsigned(Word32Shr(left32, right32)));
default:
@@ -10426,7 +10365,7 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
}
CodeStubArguments::CodeStubArguments(
- CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc, Node* fp,
+ CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
: assembler_(assembler),
argc_mode_(param_mode),
@@ -10463,7 +10402,7 @@ TNode<Object> CodeStubArguments::AtIndex(
Node* index, CodeStubAssembler::ParameterMode mode) const {
DCHECK_EQ(argc_mode_, mode);
CSA_ASSERT(assembler_,
- assembler_->UintPtrOrSmiLessThan(index, GetLength(), mode));
+ assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode));
return assembler_->UncheckedCast<Object>(
assembler_->Load(MachineType::AnyTagged(), AtIndexPtr(index, mode)));
}
@@ -10529,7 +10468,9 @@ void CodeStubArguments::PopAndReturn(Node* value) {
} else {
pop_count = argc_;
}
- assembler_->PopAndReturn(pop_count, value);
+
+ assembler_->PopAndReturn(assembler_->ParameterToWord(pop_count, argc_mode_),
+ value);
}
Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
@@ -10586,6 +10527,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
Node* const fun = Allocate(JSFunction::kSizeWithoutPrototype);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
StoreMapNoWriteBarrier(fun, map);
StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -10752,5 +10694,23 @@ void CodeStubAssembler::PerformStackCheck(Node* context) {
BIND(&ok);
}
+void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
+ Node* context, int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
+ SmiConstant(slots));
+
+ Node* const empty_fn =
+ LoadContextElement(native_context, Context::CLOSURE_INDEX);
+ StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
+ StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
+ UndefinedConstant());
+ StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
+ TheHoleConstant());
+ StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
+ native_context);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 44becb3981..4a72b203a7 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -22,48 +22,65 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_CONSTANT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(CodeMap, code_map, CodeMap) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(MetaMap, meta_map, MetaMap) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
- V(NanValue, nan_value, Nan) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(NullValue, null_value, Null) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(SpeciesProtector, species_protector, SpeciesProtector) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(Tuple3Map, tuple3_map, Tuple3Map) \
- V(UndefinedValue, undefined_value, Undefined) \
- V(WeakCellMap, weak_cell_map, WeakCellMap) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)
+#define HEAP_CONSTANT_LIST(V) \
+ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
+ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
+ V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(CodeMap, code_map, CodeMap) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(FalseValue, false_value, False) \
+ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(HeapNumberMap, heap_number_map, HeapNumberMap) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(MetaMap, meta_map, MetaMap) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(NanValue, nan_value, Nan) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(NullValue, null_value, Null) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(SpeciesProtector, species_protector, SpeciesProtector) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
+ V(SymbolMap, symbol_map, SymbolMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(TrueValue, true_value, True) \
+ V(Tuple2Map, tuple2_map, Tuple2Map) \
+ V(Tuple3Map, tuple3_map, Tuple3Map) \
+ V(UndefinedValue, undefined_value, Undefined) \
+ V(WeakCellMap, weak_cell_map, WeakCellMap) \
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
+ V(promise_default_reject_handler_symbol, \
+ promise_default_reject_handler_symbol, PromiseDefaultRejectHandlerSymbol) \
+ V(promise_default_resolve_handler_symbol, \
+ promise_default_resolve_handler_symbol, \
+ PromiseDefaultResolveHandlerSymbol)
+
+// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
+// here to simplify use in other generated builtins.
+struct IteratorRecord {
+ public:
+ // iteratorRecord.[[Iterator]]
+ compiler::TNode<JSReceiver> object;
+
+ // iteratorRecord.[[NextMethod]]
+ compiler::TNode<Object> next;
+};
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
@@ -78,6 +95,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
+ template <typename T>
+ using LazyNode = std::function<TNode<T>()>;
+
CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
@@ -274,7 +294,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
Node* SmiMod(Node* a, Node* b);
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
- Node* SmiMul(Node* a, Node* b);
+ TNode<Number> SmiMul(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
// Tries to computes dividend / divisor for Smi inputs; branching to bailout
// if the division needs to be performed as a floating point operation.
Node* TrySmiDiv(Node* dividend, Node* divisor, Label* bailout);
@@ -417,8 +437,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// It's used for testing to ensure that slow path implementation behave
// equivalent to corresponding fast paths (where applicable).
//
- // Works only in DEBUG mode or with ENABLE_FASTSLOW_SWITCH compile time flag.
- // Nop otherwise.
+ // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
// Load value from current frame by given offset in bytes.
@@ -553,8 +572,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load value field of a JSValue object.
Node* LoadJSValueValue(Node* object);
// Load value field of a WeakCell object.
- Node* LoadWeakCellValueUnchecked(Node* weak_cell);
- Node* LoadWeakCellValue(Node* weak_cell, Label* if_cleared = nullptr);
+ TNode<Object> LoadWeakCellValueUnchecked(Node* weak_cell);
+ TNode<Object> LoadWeakCellValue(SloppyTNode<WeakCell> weak_cell,
+ Label* if_cleared = nullptr);
// Load an array element from a FixedArray.
Node* LoadFixedArrayElement(Node* object, Node* index,
@@ -577,7 +597,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_hole = nullptr);
// Load a feedback slot from a FeedbackVector.
- Node* LoadFeedbackVectorSlot(
+ TNode<Object> LoadFeedbackVectorSlot(
Node* object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -988,7 +1008,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
- TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(SloppyTNode<Number> value);
+ TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
void TaggedToNumeric(Node* context, Node* value, Label* done,
Variable* var_numeric);
@@ -1105,7 +1125,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsSequentialStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSpecialReceiverInstanceType(Node* instance_type);
- Node* IsSpecialReceiverMap(Node* map);
Node* IsSpeciesProtectorCellInvalid();
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
@@ -1145,10 +1164,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// String helpers.
// Load a character from a String (might flatten a ConsString).
- TNode<Uint32T> StringCharCodeAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> index);
+ TNode<Int32T> StringCharCodeAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> index);
// Return the single character string with only {code}.
- Node* StringFromCharCode(Node* code);
+ TNode<String> StringFromCharCode(TNode<Int32T> code);
enum class SubStringFlags { NONE, FROM_TO_ARE_BOUNDED };
@@ -1179,14 +1198,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Variable* var_right, Node* right_instance_type,
Label* did_something);
- Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
+ TNode<String> StringFromCodePoint(TNode<Int32T> codepoint,
+ UnicodeEncoding encoding);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
- TNode<Number> StringToNumber(SloppyTNode<Context> context,
- SloppyTNode<String> input);
- Node* NumberToString(Node* context, Node* input);
+ TNode<Number> StringToNumber(SloppyTNode<String> input);
+ // Convert a Number to a String.
+ Node* NumberToString(Node* input);
// Convert an object to a name.
Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
@@ -1233,7 +1253,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// ES6 7.1.15 ToLength, but with inlined fast path.
Node* ToLength_Inline(Node* const context, Node* const input);
- // Convert any object to an Integer.
+ // ES6 7.1.4 ToInteger ( argument )
+ TNode<Number> ToInteger_Inline(TNode<Context> context, TNode<Object> input,
+ ToIntegerTruncationMode mode = kNoTruncation);
TNode<Number> ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
@@ -1248,22 +1270,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as a word-size node.
template <typename BitField>
- Node* DecodeWord(Node* word) {
+ TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word) {
return DecodeWord(word, BitField::kShift, BitField::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as a word-size node.
template <typename BitField>
- Node* DecodeWordFromWord32(Node* word32) {
+ TNode<UintPtrT> DecodeWordFromWord32(SloppyTNode<Word32T> word32) {
return DecodeWord<BitField>(ChangeUint32ToWord(word32));
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as an uint32 node.
template <typename BitField>
- Node* DecodeWord32FromWord(Node* word) {
- return TruncateWordToWord32(DecodeWord<BitField>(word));
+ TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
+ return UncheckedCast<Uint32T>(
+ TruncateWordToWord32(Signed(DecodeWord<BitField>(word))));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
@@ -1271,7 +1294,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
- Node* DecodeWord(Node* word, uint32_t shift, uint32_t mask);
+ TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
+ uint32_t mask);
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
@@ -1559,6 +1583,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* name_index, Variable* var_details,
Variable* var_value);
+ void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ Node* name_index, Node* details,
+ Variable* var_value);
+
void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
@@ -1600,6 +1628,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_bailout)>
LookupInHolder;
+ // For integer indexed exotic cases, check if the given string cannot be a
+ // special index. If we are not sure that the given string is not a special
+ // index with a simple check, return False. Note that "False" return value
+ // does not mean that the name_string is a special index in the current
+ // implementation.
+ void BranchIfMaybeSpecialIndex(TNode<String> name_string,
+ Label* if_maybe_special_index,
+ Label* if_not_special_index);
+
// Generic property prototype chain lookup generator.
// For properties it generates lookup using given {lookup_property_in_holder}
// and for elements it uses {lookup_element_in_holder}.
@@ -1635,9 +1672,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
SloppyTNode<IntPtrT> slot_id, const char* reason);
- // Combine the new feedback with the existing_feedback.
+ // Combine the new feedback with the existing_feedback. Do nothing if
+ // existing_feedback is nullptr.
+ void CombineFeedback(Variable* existing_feedback, int feedback);
void CombineFeedback(Variable* existing_feedback, Node* feedback);
+ // Overwrite the existing feedback with new_feedback. Do nothing if
+ // existing_feedback is nullptr.
+ void OverwriteFeedback(Variable* existing_feedback, int new_feedback);
+
// Check if a property name might require protector invalidation when it is
// used for a property store or deletion.
void CheckForAssociatedProtector(Node* name, Label* if_protector);
@@ -1657,7 +1700,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
// Loads script context from the script context table.
- Node* LoadScriptContext(Node* context, int context_index);
+ TNode<Context> LoadScriptContext(TNode<Context> context,
+ TNode<IntPtrT> context_index);
Node* Int32ToUint8Clamped(Node* int32_value);
Node* Float64ToUint8Clamped(Node* float64_value);
@@ -1759,11 +1803,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, Heap::RootListIndex root);
- Node* RelationalComparison(Operation op, Node* lhs, Node* rhs, Node* context,
+ Node* RelationalComparison(Operation op, Node* left, Node* right,
+ Node* context,
Variable* var_type_feedback = nullptr);
- void BranchIfNumericRelationalComparison(Operation op, Node* lhs, Node* rhs,
- Label* if_true, Label* if_false);
+ void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
+ Label* if_true, Label* if_false);
void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
Label* if_not_accessor_pair) {
@@ -1771,7 +1816,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Branch(IsAccessorPair(value), if_accessor_pair, if_not_accessor_pair);
}
- void GotoIfNumericGreaterThanOrEqual(Node* lhs, Node* rhs, Label* if_false);
+ void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
@@ -1786,8 +1831,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- Node* HasProperty(Node* object, Node* key, Node* context,
- HasPropertyLookupMode mode);
+ TNode<Oddball> HasProperty(SloppyTNode<HeapObject> object,
+ SloppyTNode<Name> key,
+ SloppyTNode<Context> context,
+ HasPropertyLookupMode mode);
Node* ClassOf(Node* object);
@@ -1845,7 +1892,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
UndefinedConstant(), SmiConstant(message), args...);
}
- void Abort(BailoutReason reason) {
+ void Abort(AbortReason reason) {
CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason));
Unreachable();
}
@@ -1862,11 +1909,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ Node* DescriptorNumberToIndex(SloppyTNode<Uint32T> descriptor_number);
// Implements DescriptorArray::ToKeyIndex.
// Returns an untagged IntPtr.
Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
// Implements DescriptorArray::GetKey.
Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+ // Implements DescriptorArray::GetKey.
+ TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> descriptor_number);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
@@ -1878,6 +1929,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* definitely_no_elements,
Label* possibly_elements);
+ void InitializeFunctionContext(Node* native_context, Node* context,
+ int slots);
+
private:
friend class CodeStubArguments;
@@ -1932,12 +1986,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* context, Node* input, Object::Conversion mode,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- enum class Feedback { kCollect, kNone };
- template <Feedback feedback>
void TaggedToNumeric(Node* context, Node* value, Label* done,
- Variable* var_numeric, Variable* var_feedback = nullptr);
+ Variable* var_numeric, Variable* var_feedback);
- template <Feedback feedback, Object::Conversion conversion>
+ template <Object::Conversion conversion>
void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
Variable* var_word32,
Label* if_bigint = nullptr,
@@ -1957,15 +2009,16 @@ class CodeStubArguments {
// |argc| is an intptr value which specifies the number of arguments passed
// to the builtin excluding the receiver. The arguments will include a
// receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc,
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
: CodeStubArguments(assembler, argc, nullptr,
CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
}
+
// |argc| is either a smi or intptr depending on |param_mode|. The arguments
// include a receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, SloppyTNode<IntPtrT> argc,
- Node* fp, CodeStubAssembler::ParameterMode param_mode,
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
+ CodeStubAssembler::ParameterMode param_mode,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
TNode<Object> GetReceiver() const;
@@ -1987,7 +2040,10 @@ class CodeStubArguments {
TNode<Object> GetOptionalArgumentValue(int index,
SloppyTNode<Object> default_value);
- TNode<IntPtrT> GetLength() const { return argc_; }
+ Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
+ DCHECK_EQ(mode, argc_mode_);
+ return argc_;
+ }
typedef std::function<void(Node* arg)> ForEachBodyFunction;
@@ -2013,7 +2069,7 @@ class CodeStubArguments {
CodeStubAssembler* assembler_;
CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
- TNode<IntPtrT> argc_;
+ Node* argc_;
TNode<RawPtr<Object>> arguments_;
Node* fp_;
};
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4b2bd1eaf4..2b98a5bfc7 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -404,30 +404,6 @@ TF_STUB(KeyedStoreSloppyArgumentsStub, CodeStubAssembler) {
}
}
-TF_STUB(LoadScriptContextFieldStub, CodeStubAssembler) {
- Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
- stub->context_index(), stub->slot_index());
-
- Node* context = Parameter(Descriptor::kContext);
-
- Node* script_context = LoadScriptContext(context, stub->context_index());
- Node* result = LoadFixedArrayElement(script_context, stub->slot_index());
- Return(result);
-}
-
-TF_STUB(StoreScriptContextFieldStub, CodeStubAssembler) {
- Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
- stub->context_index(), stub->slot_index());
-
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
-
- Node* script_context = LoadScriptContext(context, stub->context_index());
- StoreFixedArrayElement(script_context, IntPtrConstant(stub->slot_index()),
- value);
- Return(value);
-}
-
// TODO(ishell): move to builtins-handler-gen.
TF_STUB(StoreInterceptorStub, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -640,7 +616,7 @@ void ArrayConstructorAssembler::GenerateConstructor(
Branch(SmiEqual(array_size, SmiConstant(0)), &small_smi_size, &abort);
BIND(&abort);
- Node* reason = SmiConstant(kAllocatingNonEmptyPackedArray);
+ Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
@@ -701,23 +677,6 @@ TF_STUB(InternalArraySingleArgumentConstructorStub, ArrayConstructorAssembler) {
stub->elements_kind(), DONT_TRACK_ALLOCATION_SITE);
}
-TF_STUB(GrowArrayElementsStub, CodeStubAssembler) {
- Label runtime(this, CodeStubAssembler::Label::kDeferred);
-
- Node* object = Parameter(Descriptor::kObject);
- Node* key = Parameter(Descriptor::kKey);
- Node* context = Parameter(Descriptor::kContext);
- ElementsKind kind = stub->elements_kind();
-
- Node* elements = LoadElements(object);
- Node* new_elements =
- TryGrowElementsCapacity(object, elements, kind, key, &runtime);
- Return(new_elements);
-
- BIND(&runtime);
- TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
-}
-
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 76057ffcc2..751a89fdbd 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -26,36 +26,33 @@ class Node;
}
// List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- /* --- PlatformCodeStubs --- */ \
- V(ArrayConstructor) \
- V(CallApiCallback) \
- V(CallApiGetter) \
- V(CEntry) \
- V(DoubleToI) \
- V(InternalArrayConstructor) \
- V(JSEntry) \
- V(MathPow) \
- V(ProfileEntryHook) \
- V(StoreSlowElement) \
- /* --- TurboFanCodeStubs --- */ \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(InternalArrayNoArgumentConstructor) \
- V(InternalArraySingleArgumentConstructor) \
- V(ElementsTransitionAndStore) \
- V(KeyedLoadSloppyArguments) \
- V(KeyedStoreSloppyArguments) \
- V(LoadScriptContextField) \
- V(StoreScriptContextField) \
- V(StringAdd) \
- V(GetProperty) \
- V(StoreFastElement) \
- V(StoreInterceptor) \
- V(TransitionElementsKind) \
- V(LoadIndexedInterceptor) \
- V(GrowArrayElements)
+#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ /* --- PlatformCodeStubs --- */ \
+ V(ArrayConstructor) \
+ V(CallApiCallback) \
+ V(CallApiGetter) \
+ V(CEntry) \
+ V(DoubleToI) \
+ V(InternalArrayConstructor) \
+ V(JSEntry) \
+ V(MathPow) \
+ V(ProfileEntryHook) \
+ V(StoreSlowElement) \
+ /* --- TurboFanCodeStubs --- */ \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(InternalArrayNoArgumentConstructor) \
+ V(InternalArraySingleArgumentConstructor) \
+ V(ElementsTransitionAndStore) \
+ V(KeyedLoadSloppyArguments) \
+ V(KeyedStoreSloppyArguments) \
+ V(StringAdd) \
+ V(GetProperty) \
+ V(StoreFastElement) \
+ V(StoreInterceptor) \
+ V(TransitionElementsKind) \
+ V(LoadIndexedInterceptor)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -493,23 +490,6 @@ class GetPropertyStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
};
-class GrowArrayElementsStub : public TurboFanCodeStub {
- public:
- GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
- : TurboFanCodeStub(isolate) {
- minor_key_ = ElementsKindBits::encode(GetHoleyElementsKind(kind));
- }
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(minor_key_);
- }
-
- private:
- class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(GrowArrayElements);
- DEFINE_TURBOFAN_CODE_STUB(GrowArrayElements, TurboFanCodeStub);
-};
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
@@ -602,7 +582,7 @@ class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
class CallApiCallbackStub : public PlatformCodeStub {
public:
- static const int kArgBits = 3;
+ static const int kArgBits = 7;
static const int kArgMax = (1 << kArgBits) - 1;
CallApiCallbackStub(Isolate* isolate, int argc)
@@ -697,10 +677,18 @@ class CEntryStub : public PlatformCodeStub {
class JSEntryStub : public PlatformCodeStub {
public:
+ enum class SpecialTarget { kNone, kRunMicrotasks };
JSEntryStub(Isolate* isolate, StackFrame::Type type)
: PlatformCodeStub(isolate) {
DCHECK(type == StackFrame::ENTRY || type == StackFrame::CONSTRUCT_ENTRY);
- minor_key_ = StackFrameTypeBits::encode(type);
+ minor_key_ = StackFrameTypeBits::encode(type) |
+ SpecialTargetBits::encode(SpecialTarget::kNone);
+ }
+
+ JSEntryStub(Isolate* isolate, SpecialTarget target)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = StackFrameTypeBits::encode(StackFrame::ENTRY) |
+ SpecialTargetBits::encode(target);
}
private:
@@ -715,7 +703,26 @@ class JSEntryStub : public PlatformCodeStub {
return StackFrameTypeBits::decode(minor_key_);
}
+ SpecialTarget special_target() const {
+ return SpecialTargetBits::decode(minor_key_);
+ }
+
+ Handle<Code> EntryTrampoline() {
+ switch (special_target()) {
+ case SpecialTarget::kNone:
+ return (type() == StackFrame::CONSTRUCT_ENTRY)
+ ? BUILTIN_CODE(isolate(), JSConstructEntryTrampoline)
+ : BUILTIN_CODE(isolate(), JSEntryTrampoline);
+ case SpecialTarget::kRunMicrotasks:
+ return BUILTIN_CODE(isolate(), RunMicrotasks);
+ }
+ UNREACHABLE();
+ return Handle<Code>();
+ }
+
class StackFrameTypeBits : public BitField<StackFrame::Type, 0, 5> {};
+ class SpecialTargetBits
+ : public BitField<SpecialTarget, StackFrameTypeBits::kNext, 1> {};
int handler_offset_;
@@ -766,59 +773,6 @@ class DoubleToIStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
};
-class ScriptContextFieldStub : public TurboFanCodeStub {
- public:
- ScriptContextFieldStub(Isolate* isolate,
- const ScriptContextTable::LookupResult* lookup_result)
- : TurboFanCodeStub(isolate) {
- DCHECK(Accepted(lookup_result));
- minor_key_ = ContextIndexBits::encode(lookup_result->context_index) |
- SlotIndexBits::encode(lookup_result->slot_index);
- }
-
- int context_index() const { return ContextIndexBits::decode(minor_key_); }
-
- int slot_index() const { return SlotIndexBits::decode(minor_key_); }
-
- static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
- return ContextIndexBits::is_valid(lookup_result->context_index) &&
- SlotIndexBits::is_valid(lookup_result->slot_index);
- }
-
- private:
- static const int kContextIndexBits = 9;
- static const int kSlotIndexBits = 12;
- class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
- class SlotIndexBits
- : public BitField<int, kContextIndexBits, kSlotIndexBits> {};
-
- DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, TurboFanCodeStub);
-};
-
-
-class LoadScriptContextFieldStub : public ScriptContextFieldStub {
- public:
- LoadScriptContextFieldStub(
- Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
- : ScriptContextFieldStub(isolate, lookup_result) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_TURBOFAN_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
-};
-
-
-class StoreScriptContextFieldStub : public ScriptContextFieldStub {
- public:
- StoreScriptContextFieldStub(
- Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
- : ScriptContextFieldStub(isolate, lookup_result) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
-};
-
class StoreFastElementStub : public TurboFanCodeStub {
public:
StoreFastElementStub(Isolate* isolate, bool is_js_array,
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index b722cc4e5c..27e6dbb9da 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -74,7 +74,7 @@ CompilationInfo::CompilationInfo(Vector<const char> debug_name,
zone_(zone),
deferred_handles_(nullptr),
dependencies_(nullptr),
- bailout_reason_(kNoReason),
+ bailout_reason_(BailoutReason::kNoReason),
parameter_count_(0),
optimization_id_(-1),
debug_name_(debug_name) {}
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index e0f5c73a9c..bb5812002e 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -211,13 +211,13 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void ReopenHandlesInNewHandleScope();
void AbortOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
- if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
+ DCHECK_NE(reason, BailoutReason::kNoReason);
+ if (bailout_reason_ == BailoutReason::kNoReason) bailout_reason_ = reason;
SetFlag(kDisableFutureOptimization);
}
void RetryOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
+ DCHECK_NE(reason, BailoutReason::kNoReason);
if (GetFlag(kDisableFutureOptimization)) return;
bailout_reason_ = reason;
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index e365e301d1..1adfd090cd 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -305,7 +305,7 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherWaitForBackgroundJob");
RuntimeCallTimerScope runtimeTimer(
- isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
+ isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
base::LockGuard<base::Mutex> lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e508f5a5a7..e2f8ee0f39 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -114,8 +114,8 @@ CompilationJob::Status CompilationJob::FinalizeJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate);
- DCHECK(!compilation_info()->dependencies() ||
- !compilation_info()->dependencies()->HasAborted());
+ CHECK(!compilation_info()->dependencies() ||
+ !compilation_info()->dependencies()->HasAborted());
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToFinalize);
@@ -340,7 +340,7 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
shared_info->set_has_duplicate_parameters(
literal->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(literal);
- if (literal->dont_optimize_reason() != kNoReason) {
+ if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
shared_info->DisableOptimization(literal->dont_optimize_reason());
}
}
@@ -375,8 +375,8 @@ bool Renumber(ParseInfo* parse_info,
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
parse_info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundRenumber
- : &RuntimeCallStats::CompileRenumber);
+ ? RuntimeCallCounterId::kCompileBackgroundRenumber
+ : RuntimeCallCounterId::kCompileRenumber);
return AstNumbering::Renumber(parse_info->stack_limit(), parse_info->zone(),
parse_info->literal(), eager_literals);
}
@@ -487,7 +487,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BailoutId osr_offset) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
- &RuntimeCallStats::CompileGetFromOptimizedCodeMap);
+ RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
@@ -513,7 +513,7 @@ void ClearOptimizedCodeCache(CompilationInfo* compilation_info) {
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
- vector->ClearOptimizedCode();
+ vector->ClearOptimizationMarker();
}
}
@@ -543,8 +543,8 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
bool GetOptimizedCodeNow(CompilationJob* job, Isolate* isolate) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
CompilationInfo* compilation_info = job->compilation_info();
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -590,8 +590,8 @@ bool GetOptimizedCodeLater(CompilationJob* job, Isolate* isolate) {
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -653,26 +653,29 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Do not use TurboFan if we need to be able to set break points.
if (compilation_info->shared_info()->HasBreakInfo()) {
- compilation_info->AbortOptimization(kFunctionBeingDebugged);
+ compilation_info->AbortOptimization(BailoutReason::kFunctionBeingDebugged);
return MaybeHandle<Code>();
}
// Do not use TurboFan when %NeverOptimizeFunction was applied.
if (shared->optimization_disabled() &&
- shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
- compilation_info->AbortOptimization(kOptimizationDisabledForTest);
+ shared->disable_optimization_reason() ==
+ BailoutReason::kOptimizationDisabledForTest) {
+ compilation_info->AbortOptimization(
+ BailoutReason::kOptimizationDisabledForTest);
return MaybeHandle<Code>();
}
// Do not use TurboFan if optimization is disabled or function doesn't pass
// turbo_filter.
if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) {
- compilation_info->AbortOptimization(kOptimizationDisabled);
+ compilation_info->AbortOptimization(BailoutReason::kOptimizationDisabled);
return MaybeHandle<Code>();
}
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// In case of concurrent recompilation, all handles below this point will be
@@ -716,8 +719,8 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
CompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kRecompileSynchronous);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -735,9 +738,9 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
// 4) Code generation may have failed.
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
- job->RetryOptimization(kOptimizationDisabled);
+ job->RetryOptimization(BailoutReason::kOptimizationDisabled);
} else if (compilation_info->dependencies()->HasAborted()) {
- job->RetryOptimization(kBailedOutDueToDependencyChange);
+ job->RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
@@ -809,8 +812,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
- : &RuntimeCallStats::CompileScript);
+ isolate, parse_info->is_eval() ? RuntimeCallCounterId::kCompileEval
+ : RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, isolate)) {
@@ -860,8 +863,8 @@ bool Compiler::Analyze(ParseInfo* parse_info,
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
parse_info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundAnalyse
- : &RuntimeCallStats::CompileAnalyse);
+ ? RuntimeCallCounterId::kCompileBackgroundAnalyse
+ : RuntimeCallCounterId::kCompileAnalyse);
if (!Rewriter::Rewrite(parse_info)) return false;
DeclarationScope::Analyze(parse_info);
if (!Renumber(parse_info, eager_literals)) return false;
@@ -890,7 +893,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
PostponeInterruptsScope postpone(isolate);
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileFunction);
+ RuntimeCallCounterId::kCompileFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
@@ -1174,17 +1177,59 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-namespace {
+MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
+ Handle<String> source, Handle<FixedArray> arguments,
+ Handle<Context> context, int line_offset, int column_offset,
+ Handle<Object> script_name, ScriptOriginOptions options) {
+ Isolate* isolate = source->GetIsolate();
+ int source_length = source->length();
+ isolate->counters()->total_compile_size()->Increment(source_length);
-bool ContainsAsmModule(Handle<Script> script) {
- DisallowHeapAllocation no_gc;
- SharedFunctionInfo::ScriptIterator iter(script);
- while (SharedFunctionInfo* info = iter.Next()) {
- if (info->HasAsmWasmData()) return true;
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
}
- return false;
+ if (!script_name.is_null()) {
+ script->set_name(*script_name);
+ script->set_line_offset(line_offset);
+ script->set_column_offset(column_offset);
+ }
+ script->set_wrapped_arguments(*arguments);
+ script->set_origin_options(options);
+
+ ParseInfo parse_info(script);
+ parse_info.set_eval(); // Use an eval scope as declaration scope.
+ parse_info.set_wrapped_as_function();
+ if (!context->IsNativeContext()) {
+ parse_info.set_outer_scope_info(handle(context->scope_info()));
+ }
+
+ Handle<SharedFunctionInfo> top_level;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level,
+ CompileToplevel(&parse_info, isolate), JSFunction);
+
+ Handle<JSFunction> top_level_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(top_level, context,
+ NOT_TENURED);
+
+ // TODO(yangguo): consider not having to call the top-level function, and
+ // instead instantiate the wrapper function directly.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, top_level_fun, isolate->global_proxy(), 0,
+ nullptr),
+ JSFunction);
+
+ // OnAfterCompile has to be called after we create the JSFunction, which we
+ // may require to recompile the eval for debugging, if we find a function
+ // that contains break points in the eval script.
+ isolate->debug()->OnAfterCompile(script);
+ return Handle<JSFunction>::cast(result);
}
+namespace {
+
bool ShouldProduceCodeCache(ScriptCompiler::CompileOptions options) {
return options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kProduceFullCodeCache;
@@ -1369,6 +1414,13 @@ struct ScriptCompileTimerScope {
return CacheBehaviour::kNoCacheBecauseInDocumentWrite;
case ScriptCompiler::kNoCacheBecauseResourceWithNoCacheHandler:
return CacheBehaviour::kNoCacheBecauseResourceWithNoCacheHandler;
+ case ScriptCompiler::kNoCacheBecauseDeferredProduceCodeCache: {
+ if (hit_isolate_cache_) {
+ return CacheBehaviour::kHitIsolateCacheWhenProduceCodeCache;
+ } else {
+ return CacheBehaviour::kProduceCodeCache;
+ }
+ }
}
UNREACHABLE();
}
@@ -1438,7 +1490,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
+ if (compile_options == ScriptCompiler::kNoCompileOptions ||
+ compile_options == ScriptCompiler::kEagerCompile) {
cached_data = nullptr;
} else if (compile_options == ScriptCompiler::kProduceParserCache ||
ShouldProduceCodeCache(compile_options)) {
@@ -1477,8 +1530,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileDeserialize);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
@@ -1493,6 +1546,9 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
inner_result, vector);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
isolate->debug()->OnAfterCompile(script);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
return inner_result;
}
// Deserializer failed. Fall through to compile.
@@ -1556,8 +1612,9 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (!context->IsNativeContext()) {
parse_info.set_outer_scope_info(handle(context->scope_info()));
}
- parse_info.set_eager(compile_options ==
- ScriptCompiler::kProduceFullCodeCache);
+ parse_info.set_eager(
+ (compile_options == ScriptCompiler::kProduceFullCodeCache) ||
+ (compile_options == ScriptCompiler::kEagerCompile));
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
@@ -1572,13 +1629,13 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compilation_cache->PutScript(source, context, language_mode, result,
vector);
if (ShouldProduceCodeCache(compile_options) &&
- !ContainsAsmModule(script)) {
+ !script->ContainsAsmModule()) {
compile_timer.set_producing_code_cache();
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileSerialize);
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileSerialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileSerialize");
*cached_data = CodeSerializer::Serialize(isolate, result, source);
@@ -1610,8 +1667,8 @@ std::unique_ptr<CompilationJob> Compiler::CompileTopLevelOnBackgroundThread(
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
- parse_info->is_eval() ? &RuntimeCallStats::CompileBackgroundEval
- : &RuntimeCallStats::CompileBackgroundScript);
+ parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
+ : RuntimeCallCounterId::kCompileBackgroundScript);
LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
parse_info->set_language_mode(
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index cc63697221..b84134c14e 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -101,6 +101,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
+ // Create a function that results from wrapping |source| in a function,
+ // with |arguments| being a list of parameters for that function.
+ MUST_USE_RESULT static MaybeHandle<JSFunction> GetWrappedFunction(
+ Handle<String> source, Handle<FixedArray> arguments,
+ Handle<Context> context, int line_offset = 0, int column_offset = 0,
+ Handle<Object> script_name = Handle<Object>(),
+ ScriptOriginOptions options = ScriptOriginOptions());
+
// Returns true if the embedder permits compiling the given source string in
// the given context.
static bool CodeGenerationFromStringsAllowed(Isolate* isolate,
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index b63f5431e2..2e9052e0c3 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -3,13 +3,16 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
-mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
+# For backend
+bbudge@chromium.org
+mtrofin@chromium.org
+
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
per-file wasm-*=bradnelson@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index ac4fc4363b..13d6801c32 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -52,6 +52,14 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForBigIntBitfield() {
+ FieldAccess access = {
+ kTaggedBase, BigInt::kBitfieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::IntPtr(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e348c0f71b..a2ce1f800b 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
+ // Provides access to BigInt's bit field.
+ static FieldAccess ForBigIntBitfield();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 1a66e5b7d4..a238cf29d4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -152,49 +152,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadFloat final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
- __ vmov(result_, Float32(-1.0f));
- __ vsqrt(result_, result_);
- }
-
- private:
- SwVfpRegister const result_;
-};
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
- __ vmov(result_, Double(-1.0));
- __ vsqrt(result_, result_);
- }
-
- private:
- DwVfpRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
@@ -359,64 +316,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
- do { \
- auto result = i.Output##Type##Register(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
- __ b(hs, ool->entry()); \
- __ vldr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ b(hs, ool->entry()); \
- __ asm_instr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FP(Type) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.Input##Type##Register(2); \
- __ vstr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.InputRegister(2); \
- __ asm_instr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
__ asm_instr(i.OutputRegister(), \
@@ -432,51 +331,51 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label exchange; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&exchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ store_instr(i.TempRegister(0), i.InputRegister(2), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &exchange); \
- __ dmb(ISH); \
- } while (0)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&compareExchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ teq(i.InputRegister(2), Operand(i.OutputRegister(0))); \
- __ b(ne, &exit); \
- __ store_instr(i.TempRegister(0), i.InputRegister(3), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &compareExchange); \
- __ bind(&exit); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&exchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(2), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &exchange); \
+ __ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, \
+ cmp_reg) \
do { \
- Label binop; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ Label compareExchange; \
+ Label exit; \
__ dmb(ISH); \
- __ bind(&binop); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ store_instr(i.TempRegister(1), i.TempRegister(0), i.InputRegister(0)); \
- __ teq(i.TempRegister(1), Operand(0)); \
- __ b(ne, &binop); \
+ __ bind(&compareExchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ teq(cmp_reg, Operand(i.OutputRegister(0))); \
+ __ b(ne, &exit); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &compareExchange); \
+ __ bind(&exit); \
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(2), i.TempRegister(0), i.TempRegister(1)); \
+ __ teq(i.TempRegister(2), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -675,17 +574,18 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int pc_offset = __ pc_offset();
- int offset =
- Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset + 8);
+ int offset = Code::kCodeDataContainerOffset -
+ (Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
// We can use the register pc - 8 for the address of the current instruction.
- __ ldr(ip, MemOperand(pc, offset));
+ __ ldr_pcrel(ip, offset);
__ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
@@ -804,7 +704,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Check the function's context matches the context argument.
__ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1681,13 +1581,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- if (op->representation() == MachineRepresentation::kFloat64) {
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ vpush(i.InputFloatRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ vpush(i.InputFloatRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ case MachineRepresentation::kSimd128: {
+ __ vpush(i.InputSimd128Register(0));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
} else {
__ push(i.InputRegister(0));
@@ -1701,6 +1611,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmPeek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ vldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2558,47 +2486,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FP(Float);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FP(Double);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FP(Float);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FP(Double);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
@@ -2642,25 +2529,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
case kAtomicCompareExchangeInt8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeInt16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
+ i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -2686,10 +2583,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_CHECKED_LOAD_FP
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_FP
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
@@ -2774,7 +2667,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2878,7 +2771,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2929,15 +2822,16 @@ void CodeGenerator::AssembleConstructFrame() {
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromThrow));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
__ bind(&done);
}
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
if (shrink_slots > 0) {
__ sub(sp, sp, Operand(shrink_slots * kPointerSize));
@@ -2953,16 +2847,29 @@ void CodeGenerator::AssembleConstructFrame() {
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
+
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
}
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ sub(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Free space of returns.
+ __ add(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index c839d25cab..a7cf80450a 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -124,6 +124,7 @@ namespace compiler {
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
+ V(ArmPeek) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 0092a9dbe5..a592515179 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -262,6 +262,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrh:
case kArmLdrsh:
case kArmLdr:
+ case kArmPeek:
return kIsLoadOperation;
case kArmVstrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 4ded82fa5b..f94d114d07 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -300,7 +300,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -721,93 +722,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- offset_operand, length_operand, g.UseRegister(value),
- g.UseRegister(buffer), offset_operand);
-}
-
-
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
@@ -868,14 +782,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t const shift = mshr.right().Value();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
- (value == 0xff)) {
+ (value == 0xFF)) {
// Merge SHR into AND by emitting a UXTB instruction with a
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
return;
- } else if (((shift == 8) || (shift == 16)) && (value == 0xffff)) {
+ } else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
@@ -897,9 +811,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
}
}
- } else if (value == 0xffff) {
+ } else if (value == 0xFFFF) {
// Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
- // better than AND 0xff for this operation.
+ // better than AND 0xFF for this operation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(m.left().node()), g.TempImmediate(0));
return;
@@ -995,7 +909,8 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1206,6 +1121,7 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1230,12 +1146,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(0xff)) {
+ if (mleft.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
- } else if (mleft.right().Is(0xffff)) {
+ } else if (mleft.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@@ -1284,12 +1200,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0xff)) {
+ if (mright.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
- } else if (mright.right().Is(0xffff)) {
+ } else if (mright.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
@@ -1358,7 +1274,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
@@ -1596,22 +1513,44 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input.node()));
+ g.UseRegister(input.node));
}
}
} else {
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ ArmOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kArmPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1630,7 +1569,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1825,7 +1765,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1984,7 +1925,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -2006,14 +1948,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2276,15 +2218,14 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
@@ -2313,16 +2254,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2352,17 +2293,15 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temps[2];
- size_t temp_count = 0;
- temps[temp_count++] = g.TempRegister();
- temps[temp_count++] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
#define VISIT_ATOMIC_BINOP(op) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 3673ee2426..147d85a171 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -264,46 +264,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadNaN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNaN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ Mov(result_, 0); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
@@ -336,14 +296,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push(lr);
+ __ Push(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
if (must_save_lr_) {
- __ Pop(lr);
+ __ Pop(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -416,90 +376,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
- do { \
- if (length.IsImmediate() && \
- base::bits::IsPowerOfTwo(length.ImmediateValue())) { \
- __ Tst(offset, ~(length.ImmediateValue() - 1)); \
- __ B(ne, out_of_bounds); \
- } else { \
- __ Cmp(offset, length); \
- __ B(hs, out_of_bounds); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
- do { \
- auto result = i.OutputFloat##width##Register(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister32(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputFloat##width##OrZeroRegister(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ Str(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister32(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister64(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -579,12 +455,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
- const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
- __ Mov(csp, fp);
- } else {
- __ Mov(jssp, fp);
- }
+ __ Mov(csp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -633,6 +504,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int current_sp_offset = state->GetSPToFPSlotCount() +
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
tasm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
@@ -652,31 +524,48 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
+ DCHECK_EQ(first_unused_stack_slot % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
+ DCHECK(instr->IsTailCall());
+ InstructionOperandConverter g(this, instr);
+ int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
+ if (optional_padding_slot % 2) {
+ __ Poke(padreg, optional_padding_slot * kPointerSize);
+ }
}
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // The Adr instruction gets the address of the current instruction.
- __ Adr(x2, &current);
- __ Bind(&current);
- int pc = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ Ldr(x2, MemOperand(x2, offset));
- __ Ldr(x2, FieldMemOperand(x2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ {
+ // Since we always emit a bailout check at the very beginning we can be
+ // certain that the distance between here and the {CodeDataContainer} is
+ // fixed and always in range of a load.
+ int data_container_offset =
+ (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
+ DCHECK_GE(0, data_container_offset);
+ DCHECK_EQ(0, data_container_offset % 4);
+ InstructionAccurateScope scope(tasm());
+ __ ldr_pcrel(scratch, data_container_offset >> 2);
+ }
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ Label not_deoptimized;
+ __ Tbz(scratch, Code::kMarkedForDeoptimizationBit, &not_deoptimized);
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Bind(&not_deoptimized);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -700,18 +589,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -734,18 +611,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -813,24 +678,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x10);
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -1339,75 +1192,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64ClaimCSP: {
- int count = RoundUp(i.InputInt32(0), 2);
- Register prev = __ StackPointer();
- if (prev.Is(jssp)) {
- // TODO(titzer): make this a macro-assembler method.
- // Align the CSP and store the previous JSSP on the stack. We do not
- // need to modify the SP delta here, as we will continue to access the
- // frame via JSSP.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
-
- // TODO(arm64): Storing JSSP on the stack is redundant when calling a C
- // function, as JSSP is callee-saved (we still need to do this when
- // calling a code object that uses the CSP as the stack pointer). See
- // the code generation for kArchCallCodeObject vs. kArchCallCFunction
- // (the latter does not restore CSP/JSSP).
- // TurboAssembler::CallCFunction() (safely) drops this extra slot
- // anyway.
- int sp_alignment = __ ActivationFrameAlignment();
- __ Sub(tmp, jssp, kPointerSize);
- __ Bic(csp, tmp, sp_alignment - 1);
- __ Str(jssp, MemOperand(csp));
- if (count > 0) {
- __ SetStackPointer(csp);
- __ Claim(count);
- __ SetStackPointer(prev);
- }
- } else {
- __ AssertCspAligned();
- if (count > 0) {
- __ Claim(count);
- frame_access_state()->IncreaseSPDelta(count);
- }
- }
- break;
- }
- case kArm64ClaimJSSP: {
+ case kArm64Claim: {
int count = i.InputInt32(0);
- if (csp.Is(__ StackPointer())) {
- // No JSSP is set up. Compute it from the CSP.
- __ AssertCspAligned();
- if (count > 0) {
- int even = RoundUp(count, 2);
- __ Sub(jssp, csp, count * kPointerSize);
- // We must also update CSP to maintain stack consistency:
- __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
- __ AssertStackConsistency();
- frame_access_state()->IncreaseSPDelta(even);
- } else {
- __ Mov(jssp, csp);
- }
- } else {
- // JSSP is the current stack pointer, just use regular Claim().
+ DCHECK_EQ(count % 2, 0);
+ __ AssertCspAligned();
+ if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
}
break;
}
- case kArm64PokeCSP: // fall through
- case kArm64PokeJSSP: {
- Register prev = __ StackPointer();
- __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
+ case kArm64Poke: {
Operand operand(i.InputInt32(1) * kPointerSize);
- if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ Poke(i.InputSimd128Register(0), operand);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
- __ Poke(i.InputRegister(0), operand);
+ __ Poke(i.InputOrZeroRegister64(0), operand);
}
- __ SetStackPointer(prev);
break;
}
case kArm64PokePair: {
@@ -1421,6 +1224,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArm64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Ldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArm64Clz:
__ Clz(i.OutputRegister64(), i.InputRegister64(0));
break;
@@ -1652,28 +1472,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
case kArm64Float64ExtractHighWord32:
- // TODO(arm64): This should use MOV (to general) when NEON is supported.
- __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
- __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
+ __ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1);
break;
- case kArm64Float64InsertLowWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp, i.InputFloat64Register(0));
- __ Bfi(tmp, i.InputRegister(1), 0, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertLowWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1));
break;
- }
- case kArm64Float64InsertHighWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp.W(), i.InputFloat32Register(0));
- __ Bfi(tmp, i.InputRegister(1), 32, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertHighWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1));
break;
- }
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
@@ -1734,48 +1542,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Str);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(32);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(64);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
@@ -1860,13 +1626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_BOUNDS_CHECK
-#undef ASSEMBLE_CHECKED_LOAD_FLOAT
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER_64
-#undef ASSEMBLE_CHECKED_STORE_FLOAT
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_INTEGER_64
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
@@ -2437,8 +2196,6 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@@ -2512,12 +2269,6 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
- __ SetStackPointer(csp);
- } else {
- __ SetStackPointer(jssp);
- }
-
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
@@ -2540,10 +2291,10 @@ void CodeGenerator::FinishFrame(Frame* frame) {
void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
+ // The frame has been previously padded in CodeGenerator::FinishFrame().
+ DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
@@ -2551,11 +2302,13 @@ void CodeGenerator::AssembleConstructFrame() {
descriptor->CalleeSavedRegisters());
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
+ // The number of slots for returns has to be even to ensure the correct stack
+ // alignment.
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
if (frame_access_state()->has_frame()) {
// Link the frame
if (descriptor->IsJSFunctionCall()) {
- DCHECK(!descriptor->UseNativeStack());
__ Prologue();
} else {
__ Push(lr, fp);
@@ -2566,7 +2319,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Create OSR entry if applicable
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the
// unoptimized frame is still on the stack. Optimized code uses OSR values
@@ -2604,10 +2357,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
DCHECK(__ StackPointer().Is(csp));
- __ SetStackPointer(jssp);
__ AssertStackConsistency();
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@@ -2617,7 +2367,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
- __ SetStackPointer(csp);
__ AssertStackConsistency();
__ Bind(&done);
}
@@ -2625,6 +2374,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved slots, which are pushed below.
shrink_slots -= saves.Count();
shrink_slots -= saves_fp.Count();
+ shrink_slots -= returns;
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, i.e. claiming the extra slot that
@@ -2667,11 +2417,21 @@ void CodeGenerator::AssembleConstructFrame() {
// CPURegList::GetCalleeSaved(): x30 is missing.
// DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
__ PushCPURegList(saves);
+
+ if (returns != 0) {
+ __ Claim(returns);
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
+
+ if (returns != 0) {
+ __ Drop(returns);
+ }
+
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
descriptor->CalleeSavedRegisters());
@@ -2698,33 +2458,22 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
} else {
__ Bind(&return_label_);
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
} else {
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
- } else if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
- __ Drop(pop_count);
+ __ DropArguments(pop_count);
} else {
Register pop_reg = g.ToRegister(pop);
__ Add(pop_reg, pop_reg, pop_count);
- __ Drop(pop_reg);
+ __ DropArguments(pop_reg);
}
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
__ Ret();
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 6354dfc4db..820b55a99d 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -79,11 +79,10 @@ namespace compiler {
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
- V(Arm64ClaimCSP) \
- V(Arm64ClaimJSSP) \
- V(Arm64PokeCSP) \
- V(Arm64PokeJSSP) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
V(Arm64PokePair) \
+ V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
@@ -326,8 +325,6 @@ namespace compiler {
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */
-enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 0294c828da..c2b0a4e386 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -128,6 +128,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64ExtractHighWord32:
case kArm64Float64InsertLowWord32:
case kArm64Float64InsertHighWord32:
+ case kArm64Float64Mod:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
@@ -292,14 +293,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
+ case kArm64Peek:
return kIsLoadOperation;
- case kArm64Float64Mod: // This opcode will call a C Function which can
- // alter CSP. TODO(arm64): Remove once JSSP is gone.
- case kArm64ClaimCSP:
- case kArm64ClaimJSSP:
- case kArm64PokeCSP:
- case kArm64PokeJSSP:
+ case kArm64Claim:
+ case kArm64Poke:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
@@ -387,16 +385,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ldrsw:
return 11;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return 5;
-
case kArm64Str:
case kArm64StrD:
case kArm64StrS:
@@ -405,14 +393,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Strh:
return 1;
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
- return 1;
-
case kArm64Madd32:
case kArm64Mneg32:
case kArm64Msub32:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 201c0613c4..d6082c9f0a 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -43,7 +43,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -295,12 +295,12 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
- if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
+ if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
int32_t mask = mright.right().Value();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
- (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
+ (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
return true;
}
} else if (nm.IsWord32Sar()) {
@@ -488,7 +488,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -760,110 +761,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseImmediate(length));
- return;
- }
- }
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
- return;
- }
- }
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseOperand(length, kArithmeticImm),
- g.UseRegisterOrImmediateZero(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -950,7 +847,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -991,7 +888,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1105,16 +1002,16 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
- // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
- // OP is >>> or >> and (K & 0x1f) != 0.
+ // Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
+ // OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
- (mleft.right().Value() & 0x1f) != 0 &&
- (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
+ (mleft.right().Value() & 0x1F) != 0 &&
+ (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
- int right_val = m.right().Value() & 0x1f;
+ int right_val = m.right().Value() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -1132,7 +1029,7 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1160,7 +1057,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1176,7 +1073,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1211,7 +1108,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1361,6 +1258,8 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1483,7 +1382,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
} else {
@@ -1784,29 +1684,33 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
-
- bool always_claim = to_native_stack != from_native_stack;
-
+ // `arguments` includes alignment "holes". This means that slots bigger than
+ // kPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
+ claim_count = RoundUp(claim_count, 2);
// Bump the stack pointer(s).
- if (claim_count > 0 || always_claim) {
+ if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
- // and emit paired stores with increment for non c frames.
- ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
- // ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
- // CSP and JSSP.
- Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
+ // and emit paired stores with increment for non c frames.
+ Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
+ }
+
+ if (claim_count > 0) {
+ // Store padding, which might be overwritten.
+ Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
+ g.TempImmediate(claim_count - 1));
}
// Poke the arguments into the stack.
- ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) {
- Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
- g.TempImmediate(slot));
+ Node* input_node = (*arguments)[slot].node;
+ // Skip any alignment holes in pushed nodes.
+ if (input_node != nullptr) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input_node),
+ g.TempImmediate(slot));
+ }
slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
// same type.
@@ -1816,6 +1720,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Arm64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+
+ Emit(kArm64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1834,7 +1761,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2002,24 +1930,23 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
} else {
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
}
}
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
-// against zero, depending on the condition.
-bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
- FlagsCondition cond, FlagsContinuation* cont) {
- Int32BinopMatcher m_user(user);
- USE(m_user);
- DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
-
+// against {value}, depending on the condition.
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
+ Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
switch (cond) {
case kSignedLessThan:
case kSignedGreaterThanOrEqual: {
+ // Here we handle sign tests, aka. comparisons with zero.
+ if (value != 0) return false;
// We don't generate TBZ/TBNZ for deoptimisations, as they have a
// shorter range than conditional branches and generating them for
// deoptimisations results in more veneers.
@@ -2045,9 +1972,29 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
return true;
}
case kEqual:
- case kNotEqual:
+ case kNotEqual: {
+ if (node->opcode() == IrOpcode::kWord32And) {
+ // Emit a tbz/tbnz if we are comparing with a single-bit mask:
+ // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false)
+ Int32BinopMatcher m_and(node);
+ if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
+ m_and.right().Is(value) && selector->CanCover(user, node)) {
+ Arm64OperandGenerator g(selector);
+ // In the code generator, Equal refers to a bit being cleared. We want
+ // the opposite here so negate the condition.
+ cont->Negate();
+ selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+ g.UseRegister(m_and.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(value)),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return true;
+ }
+ }
+ } // Fall through.
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
+ if (value != 0) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
@@ -2062,15 +2009,20 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
- ArchOpcode opcode = kArm64Cmp32;
FlagsCondition cond = cont->condition();
- if (m.right().Is(0)) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
- } else if (m.left().Is(0)) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
return;
+ }
}
+ ArchOpcode opcode = kArm64Cmp32;
ImmediateMode immediate_mode = kArithmeticImm;
if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
// Emit flag setting add/and instructions for comparisons against zero.
@@ -2141,7 +2093,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
Arm64OperandGenerator g(selector);
Matcher m(node);
if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation(m.right().Value()) == 1)) {
+ base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
selector->Emit(
@@ -2356,7 +2308,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsTrap());
selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
@@ -2376,14 +2329,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2648,7 +2601,7 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
return;
}
- Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
@@ -2665,7 +2618,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
return;
}
- Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 5406ec5766..53c3435b55 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -103,8 +103,9 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7e1fbfddb3..54a924fce4 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -949,7 +950,7 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
Handle<Name> name =
Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -957,19 +958,13 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
+ LanguageMode language_mode =
+ feedback.vector()->GetLanguageMode(feedback.slot());
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
Node* node = NewNode(op, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
- BuildStoreGlobal(LanguageMode::kSloppy);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict() {
- BuildStoreGlobal(LanguageMode::kStrict);
-}
-
void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
PrepareEagerCheckpoint();
@@ -1609,7 +1604,8 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* op =
- javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ javascript()->Call(arg_count, frequency, feedback, receiver_mode,
+ GetSpeculationMode(slot_id));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
op, args, static_cast<int>(arg_count), feedback.slot());
if (lowering.IsExit()) return;
@@ -1947,8 +1943,8 @@ void BytecodeGraphBuilder::VisitThrow() {
void BytecodeGraphBuilder::VisitAbort() {
BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
- BailoutReason reason =
- static_cast<BailoutReason>(bytecode_iterator().GetIndexOperand(0));
+ AbortReason reason =
+ static_cast<AbortReason>(bytecode_iterator().GetIndexOperand(0));
NewNode(simplified()->RuntimeAbort(reason));
Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
@@ -2104,6 +2100,11 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
invocation_frequency_.value());
}
+SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
+ CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return nexus.GetSpeculationMode();
+}
+
void BytecodeGraphBuilder::VisitBitwiseNot() {
BuildUnaryOp(javascript()->BitwiseNot());
}
@@ -2574,7 +2575,7 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
PrepareEagerCheckpoint();
Node* acc = environment()->LookupAccumulator();
- Node* acc_smi = NewNode(simplified()->CheckSmi(), acc);
+ Node* acc_smi = NewNode(simplified()->CheckSmi(VectorSlotPair()), acc);
BuildSwitchOnSmi(acc_smi);
}
@@ -2670,7 +2671,9 @@ void BytecodeGraphBuilder::VisitForInNext() {
// We need to rename the {index} here, as in case of OSR we loose the
// information that the {index} is always a valid unsigned Smi value.
index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
+ environment()->GetEffectDependency(),
environment()->GetControlDependency());
+ environment()->UpdateEffectDependency(index);
FeedbackSlot slot =
feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
@@ -2736,14 +2739,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
environment()->BindAccumulator(state, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
+void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* generator =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register generator_state_reg =
+ bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(2);
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
int register_count =
- static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(3));
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
@@ -2751,6 +2756,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
}
+
+ // We're no longer resuming, so update the state register.
+ environment()->BindRegister(
+ generator_state_reg,
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+
+ // Update the accumulator with the generator's input_or_debug_pos.
+ Node* input_or_debug_pos =
+ NewNode(javascript()->GeneratorRestoreInputOrDebugPos(), generator);
+ environment()->BindAccumulator(input_or_debug_pos);
}
void BytecodeGraphBuilder::VisitWide() {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 562c3ddaea..91b857298c 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class VectorSlotPair;
+
namespace compiler {
class Reduction;
@@ -152,7 +155,6 @@ class BytecodeGraphBuilder {
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
TypeofMode typeof_mode);
- void BuildStoreGlobal(LanguageMode language_mode);
enum class StoreMode {
// Check the prototype chain before storing.
@@ -232,6 +234,10 @@ class BytecodeGraphBuilder {
// feedback.
CallFrequency ComputeCallFrequency(int slot_id) const;
+ // Helper function to extract the speculation mode from the recorded type
+ // feedback.
+ SpeculationMode GetSpeculationMode(int slot_id) const;
+
// Control flow plumbing.
void BuildJump();
void BuildJumpIf(Node* condition);
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index dd4197d466..330b19fac3 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -224,7 +224,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index f24cec64a7..071f8952db 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -245,7 +245,12 @@ TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) {
}
TNode<Number> CodeAssembler::NumberConstant(double value) {
- return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ int smi_value;
+ if (DoubleToSmiInteger(value, &smi_value)) {
+ return UncheckedCast<Number>(SmiConstant(smi_value));
+ } else {
+ return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ }
}
TNode<Smi> CodeAssembler::SmiConstant(Smi* value) {
@@ -1357,13 +1362,13 @@ Node* CodeAssemblerVariable::value() const {
str << "#Use of unbound variable:"
<< "#\n Variable: " << *this << "#\n Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
if (!state_->InsideBlock()) {
std::stringstream str;
str << "#Accessing variable value outside a block:"
<< "#\n Variable: " << *this;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
return impl_->value_;
@@ -1456,7 +1461,7 @@ void CodeAssemblerLabel::MergeVariables() {
}
str << "\n# Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
}
@@ -1472,7 +1477,7 @@ void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
str << "Cannot bind the same label twice:"
<< "\n# current: " << debug_info
<< "\n# previous: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
@@ -1524,7 +1529,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
<< " vs. found=" << (not_found ? 0 : i->second.size())
<< "\n# Variable: " << *var_impl
<< "\n# Current Block: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
Node* phi = state_->raw_assembler_->Phi(
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 90a9d02fce..9f0d463dc1 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
+#include "src/objects/data-handler.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone-containers.h"
@@ -26,6 +27,10 @@ namespace internal {
class Callable;
class CallInterfaceDescriptor;
class Isolate;
+class JSCollection;
+class JSWeakCollection;
+class JSWeakMap;
+class JSWeakSet;
class Factory;
class Zone;
@@ -252,7 +257,7 @@ class Node;
class RawMachineAssembler;
class RawMachineLabel;
-typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+typedef ZoneVector<CodeAssemblerVariable*> CodeAssemblerVariableList;
typedef std::function<void()> CodeAssemblerCallback;
@@ -1062,6 +1067,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool Word32ShiftIsSafe() const;
private:
+ // These two don't have definitions and are here only for catching use cases
+ // where the cast is not necessary.
+ TNode<Int32T> Signed(TNode<Int32T> x);
+ TNode<Uint32T> Unsigned(TNode<Uint32T> x);
+
RawMachineAssembler* raw_assembler() const;
// Calls respective callback registered in the state.
@@ -1157,7 +1167,7 @@ class CodeAssemblerLabel {
CodeAssembler* assembler,
const CodeAssemblerVariableList& merged_variables,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
- : CodeAssemblerLabel(assembler, merged_variables.length(),
+ : CodeAssemblerLabel(assembler, merged_variables.size(),
&(merged_variables[0]), type) {}
CodeAssemblerLabel(
CodeAssembler* assembler, size_t count,
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 3d43ab4765..0fb38e5933 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -310,7 +310,10 @@ MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
}
Handle<Code> CodeGenerator::FinalizeCode() {
- if (result_ != kSuccess) return Handle<Code>();
+ if (result_ != kSuccess) {
+ tasm()->AbortedCodeGeneration();
+ return Handle<Code>();
+ }
// Allocate exception handler table.
Handle<HandlerTable> table = HandlerTable::Empty(isolate());
@@ -915,9 +918,17 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- Translation translation(
- &translations_, static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()), zone());
+ int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
+ Translation translation(&translations_,
+ static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()),
+ update_feedback_count, zone());
+ if (entry.feedback().IsValid()) {
+ DeoptimizationLiteral literal =
+ DeoptimizationLiteral(entry.feedback().vector());
+ int literal_id = DefineDeoptimizationLiteral(literal);
+ translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
+ }
InstructionOperandIterator iter(instr, frame_state_offset);
BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
state_combine);
@@ -1000,8 +1011,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
literal = DeoptimizationLiteral(isolate()->factory()->true_value());
}
} else {
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type == MachineType::Int32() ||
type == MachineType::Uint32() ||
type.representation() == MachineRepresentation::kWord32 ||
@@ -1019,8 +1028,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kInt64:
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 70fdf71578..d9bc5c8173 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -138,9 +138,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node, condition_is_true
- ? common()->DeoptimizeIf(p.kind(), p.reason())
- : common()->DeoptimizeUnless(p.kind(), p.reason()));
+ node, condition_is_true ? common()->DeoptimizeIf(p.kind(), p.reason(),
+ VectorSlotPair())
+ : common()->DeoptimizeUnless(
+ p.kind(), p.reason(), VectorSlotPair()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -148,8 +149,9 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f43ff7e515..54af052d56 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -43,7 +43,8 @@ int ValueInputCountOfReturn(Operator const* const op) {
}
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
- return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
+ return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -51,11 +52,15 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
}
size_t hash_value(DeoptimizeParameters p) {
- return base::hash_combine(p.kind(), p.reason());
+ return base::hash_combine(p.kind(), p.reason(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ":" << p.reason();
+ os << p.kind() << ":" << p.reason();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -343,8 +348,7 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define COMMON_CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(DeadValue, Operator::kFoldable, 0, 0, 0, 1, 0, 0) \
- V(Unreachable, Operator::kFoldable, 0, 1, 1, 0, 1, 0) \
+ V(Unreachable, Operator::kFoldable, 0, 1, 1, 1, 1, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -409,7 +413,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_LIST(V) \
V(Eager, MinusZero) \
- V(Eager, NoReason) \
V(Eager, WrongMap) \
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
@@ -424,7 +427,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
V(Eager, LostPrecision) \
V(Eager, LostPrecisionOrNaN) \
- V(Eager, NoReason) \
V(Eager, NotAHeapNumber) \
V(Eager, NotANumberOrOddball) \
V(Eager, NotASmi) \
@@ -606,7 +608,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -622,7 +624,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -639,7 +641,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
@@ -817,17 +819,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
UNREACHABLE();
}
-const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimize##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::Deoptimize(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimize##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -836,17 +839,18 @@ const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::DeoptimizeIf(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -856,16 +860,17 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1131,7 +1136,7 @@ const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
return new (zone()) Operator1<Type*>( // --
IrOpcode::kTypeGuard, Operator::kPure, // opcode
"TypeGuard", // name
- 1, 0, 1, 1, 0, 0, // counts
+ 1, 1, 1, 1, 1, 0, // counts
type); // parameter
}
@@ -1278,6 +1283,11 @@ uint32_t ObjectIdOf(Operator const* op) {
}
}
+MachineRepresentation DeadValueRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kDeadValue, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
const Operator* CommonOperatorBuilder::FrameState(
BailoutId bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
@@ -1393,6 +1403,31 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
}
+const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kDeadValue, Operator::kPure, // opcode
+ "DeadValue", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ rep); // parameter
+}
+
+#undef COMMON_CACHED_OP_LIST
+#undef CACHED_RETURN_LIST
+#undef CACHED_END_LIST
+#undef CACHED_EFFECT_PHI_LIST
+#undef CACHED_INDUCTION_VARIABLE_PHI_LIST
+#undef CACHED_LOOP_LIST
+#undef CACHED_MERGE_LIST
+#undef CACHED_DEOPTIMIZE_LIST
+#undef CACHED_DEOPTIMIZE_IF_LIST
+#undef CACHED_DEOPTIMIZE_UNLESS_LIST
+#undef CACHED_TRAP_IF_LIST
+#undef CACHED_TRAP_UNLESS_LIST
+#undef CACHED_PARAMETER_LIST
+#undef CACHED_PHI_LIST
+#undef CACHED_PROJECTION_LIST
+#undef CACHED_STATE_VALUES_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 06541d9a38..0e0614dced 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -11,6 +11,7 @@
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
@@ -52,15 +53,18 @@ int ValueInputCountOfReturn(Operator const* const op);
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
- DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
- : kind_(kind), reason_(reason) {}
+ DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
+ VectorSlotPair const feedback_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -338,6 +342,8 @@ ArgumentsStateType ArgumentsStateTypeOf(Operator const*) WARN_UNUSED_RESULT;
uint32_t ObjectIdOf(Operator const*);
+MachineRepresentation DeadValueRepresentationOf(Operator const*);
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -346,7 +352,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
- const Operator* DeadValue();
+ const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
@@ -358,10 +364,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind,
- DeoptimizeReason reason);
+ const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index d40bc37b6d..523d37fe29 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -21,10 +21,8 @@ DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
- dead_value_(graph->NewNode(common->DeadValue())),
zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
- NodeProperties::SetType(dead_value_, Type::None());
}
namespace {
@@ -38,11 +36,11 @@ bool NoReturn(Node* node) {
NodeProperties::GetTypeOrAny(node)->IsNone();
}
-bool HasDeadInput(Node* node) {
+Node* FindDeadInput(Node* node) {
for (Node* input : node->inputs()) {
- if (NoReturn(input)) return true;
+ if (NoReturn(input)) return input;
}
- return false;
+ return nullptr;
}
} // namespace
@@ -209,17 +207,27 @@ Reduction DeadCodeElimination::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (PhiRepresentationOf(node->op()) == MachineRepresentation::kNone ||
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kNone ||
NodeProperties::GetTypeOrAny(node)->IsNone()) {
- return Replace(dead_value());
+ return Replace(DeadValue(node, rep));
+ }
+ int input_count = node->op()->ValueInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (input->opcode() == IrOpcode::kDeadValue &&
+ DeadValueRepresentationOf(input->op()) != rep) {
+ NodeProperties::ReplaceValueInput(node, DeadValue(input, rep), i);
+ }
}
return NoChange();
}
Reduction DeadCodeElimination::ReducePureNode(Node* node) {
DCHECK_EQ(0, node->op()->EffectInputCount());
- if (HasDeadInput(node)) {
- return Replace(dead_value());
+ if (node->opcode() == IrOpcode::kDeadValue) return NoChange();
+ if (Node* input = FindDeadInput(node)) {
+ return Replace(DeadValue(input));
}
return NoChange();
}
@@ -234,8 +242,7 @@ Reduction DeadCodeElimination::ReduceUnreachableOrIfException(Node* node) {
return Replace(effect);
}
if (effect->opcode() == IrOpcode::kUnreachable) {
- RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(effect);
}
return NoChange();
}
@@ -246,10 +253,10 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
if (effect->opcode() == IrOpcode::kDead) {
return Replace(effect);
}
- if (HasDeadInput(node)) {
+ if (Node* input = FindDeadInput(node)) {
if (effect->opcode() == IrOpcode::kUnreachable) {
RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(DeadValue(input));
}
Node* control = node->op()->ControlInputCount() == 1
@@ -257,7 +264,8 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
: graph()->start();
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
- ReplaceWithValue(node, dead_value(), node, control);
+ NodeProperties::SetType(unreachable, Type::None());
+ ReplaceWithValue(node, DeadValue(input), node, control);
return Replace(unreachable);
}
@@ -270,11 +278,12 @@ Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminate(Node* node) {
node->opcode() == IrOpcode::kTerminate);
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (HasDeadInput(node)) {
+ if (FindDeadInput(node) != nullptr) {
Node* effect = NodeProperties::GetEffectInput(node, 0);
Node* control = NodeProperties::GetControlInput(node, 0);
if (effect->opcode() != IrOpcode::kUnreachable) {
effect = graph()->NewNode(common()->Unreachable(), effect, control);
+ NodeProperties::SetType(effect, Type::None());
}
node->TrimInputCount(2);
node->ReplaceInput(0, effect);
@@ -322,6 +331,16 @@ void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
NodeProperties::ChangeOp(node, op);
}
+Node* DeadCodeElimination::DeadValue(Node* node, MachineRepresentation rep) {
+ if (node->opcode() == IrOpcode::kDeadValue) {
+ if (rep == DeadValueRepresentationOf(node->op())) return node;
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ Node* dead_value = graph()->NewNode(common()->DeadValue(rep), node);
+ NodeProperties::SetType(dead_value, Type::None());
+ return dead_value;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index b1e403ca86..217d58ef31 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -17,13 +18,23 @@ namespace compiler {
class CommonOperatorBuilder;
// Propagates {Dead} control and {DeadValue} values through the graph and
-// thereby removes dead code. When {DeadValue} hits the effect chain, a crashing
-// {Unreachable} node is inserted and the rest of the effect chain is collapsed.
-// We wait for the {EffectControlLinearizer} to connect {Unreachable} nodes to
-// the graph end, since this is much easier if there is no floating control.
-// We detect dead values based on types, pruning uses of DeadValue except for
-// uses by phi. These remaining uses are eliminated in the
-// {EffectControlLinearizer}, where they are replaced with dummy values.
+// thereby removes dead code.
+// We detect dead values based on types, replacing uses of nodes with
+// {Type::None()} with {DeadValue}. A pure node (other than a phi) using
+// {DeadValue} is replaced by {DeadValue}. When {DeadValue} hits the effect
+// chain, a crashing {Unreachable} node is inserted and the rest of the effect
+// chain is collapsed. We wait for the {EffectControlLinearizer} to connect
+// {Unreachable} nodes to the graph end, since this is much easier if there is
+// no floating control.
+// {DeadValue} has an input, which has to have {Type::None()}. This input is
+// important to maintain the dependency on the cause of the unreachable code.
+// {Unreachable} has a value output and {Type::None()} so it can be used by
+// {DeadValue}.
+// {DeadValue} nodes track a {MachineRepresentation} so they can be lowered to a
+// value-producing node. {DeadValue} has the runtime semantics of crashing and
+// behaves like a constant of its representation so it can be used in gap moves.
+// Since phi nodes are the only remaining use of {DeadValue}, this
+// representation is only adjusted for uses by phi nodes.
// In contrast to {DeadValue}, {Dead} can never remain in the graph.
class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -53,15 +64,16 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
void TrimMergeOrPhi(Node* node, int size);
+ Node* DeadValue(Node* none_node,
+ MachineRepresentation rep = MachineRepresentation::kNone);
+
Graph* graph() const { return graph_; }
CommonOperatorBuilder* common() const { return common_; }
Node* dead() const { return dead_; }
- Node* dead_value() const { return dead_value_; }
Graph* const graph_;
CommonOperatorBuilder* const common_;
Node* const dead_;
- Node* const dead_value_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 2372a0fe40..a47941e28d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -145,9 +145,10 @@ bool HasIncomingBackEdges(BasicBlock* block) {
return false;
}
-void RemoveRegionNode(Node* node) {
+void RemoveRenameNode(Node* node) {
DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
- IrOpcode::kBeginRegion == node->opcode());
+ IrOpcode::kBeginRegion == node->opcode() ||
+ IrOpcode::kTypeGuard == node->opcode());
// Update the value/context uses to the value input of the finish node and
// the effect uses to the effect input.
for (Edge edge : node->use_edges()) {
@@ -318,28 +319,6 @@ void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
merge->Kill();
}
-Node* DummyValue(JSGraph* jsgraph, MachineRepresentation rep) {
- switch (rep) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedSigned:
- return jsgraph->SmiConstant(0xdead);
- case MachineRepresentation::kTaggedPointer:
- return jsgraph->TheHoleConstant();
- case MachineRepresentation::kWord64:
- return jsgraph->Int64Constant(0xdead);
- case MachineRepresentation::kWord32:
- return jsgraph->Int32Constant(0xdead);
- case MachineRepresentation::kFloat64:
- return jsgraph->Float64Constant(0xdead);
- case MachineRepresentation::kFloat32:
- return jsgraph->Float32Constant(0xdead);
- case MachineRepresentation::kBit:
- return jsgraph->Int32Constant(0);
- default:
- UNREACHABLE();
- }
-}
-
} // namespace
void EffectControlLinearizer::Run() {
@@ -369,7 +348,6 @@ void EffectControlLinearizer::Run() {
// Iterate over the phis and update the effect phis.
Node* effect_phi = nullptr;
Node* terminate = nullptr;
- int predecessor_count = static_cast<int>(block->PredecessorCount());
for (; instr < block->NodeCount(); instr++) {
Node* node = block->NodeAt(instr);
// Only go through the phis and effect phis.
@@ -380,19 +358,7 @@ void EffectControlLinearizer::Run() {
DCHECK_NE(IrOpcode::kIfException, control->opcode());
effect_phi = node;
} else if (node->opcode() == IrOpcode::kPhi) {
- DCHECK_EQ(predecessor_count, node->op()->ValueInputCount());
- for (int i = 0; i < predecessor_count; ++i) {
- if (NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kDeadValue) {
- // Phi uses of {DeadValue} must originate from unreachable code. Due
- // to schedule freedom between the effect and the control chain,
- // they might still appear in reachable code. So we replace them
- // with a dummy value.
- NodeProperties::ReplaceValueInput(
- node, DummyValue(jsgraph(), PhiRepresentationOf(node->op())),
- i);
- }
- }
+ // Just skip phis.
} else if (node->opcode() == IrOpcode::kTerminate) {
DCHECK_NULL(terminate);
terminate = node;
@@ -573,7 +539,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservability::kObservable;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
}
if (node->opcode() == IrOpcode::kBeginRegion) {
// Determine the observability for this region and use that for all
@@ -583,7 +549,10 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservabilityOf(node->op());
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
+ }
+ if (node->opcode() == IrOpcode::kTypeGuard) {
+ return RemoveRenameNode(node);
}
// Special treatment for checkpoint nodes.
@@ -781,6 +750,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTruncateTaggedToWord32:
result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
+ case IrOpcode::kNumberToString:
+ result = LowerNumberToString(node);
+ break;
case IrOpcode::kObjectIsArrayBufferView:
result = LowerObjectIsArrayBufferView(node);
break;
@@ -847,12 +819,17 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kNewArgumentsElements:
result = LowerNewArgumentsElements(node);
break;
+ case IrOpcode::kNewConsString:
+ result = LowerNewConsString(node);
+ break;
case IrOpcode::kArrayBufferWasNeutered:
result = LowerArrayBufferWasNeutered(node);
break;
case IrOpcode::kSameValue:
result = LowerSameValue(node);
break;
+ case IrOpcode::kDeadValue:
+ result = LowerDeadValue(node);
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -862,6 +839,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringLength:
+ result = LowerStringLength(node);
+ break;
case IrOpcode::kStringToNumber:
result = LowerStringToNumber(node);
break;
@@ -874,6 +854,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kSeqStringCharCodeAt:
result = LowerSeqStringCharCodeAt(node);
break;
+ case IrOpcode::kStringCodePointAt:
+ result = LowerStringCodePointAt(node);
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ result = LowerSeqStringCharCodeAt(node);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
break;
@@ -889,6 +875,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ result = LowerNumberIsFloat64Hole(node);
+ break;
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
@@ -1136,6 +1125,7 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value = node->InputAt(0);
auto if_heapnumber = __ MakeDeferredLabel();
+ auto if_bigint = __ MakeDeferredLabel();
Node* zero = __ Int32Constant(0);
Node* fzero = __ Float64Constant(0.0);
@@ -1154,15 +1144,22 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value_map_bitfield =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
__ GotoIfNot(
- __ Word32Equal(__ Word32And(value_map_bitfield,
- __ Int32Constant(1 << Map::kIsUndetectable)),
- zero),
+ __ Word32Equal(
+ __ Word32And(value_map_bitfield,
+ __ Int32Constant(Map::IsUndetectableBit::kMask)),
+ zero),
done, zero);
// Check if {value} is a HeapNumber.
__ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
&if_heapnumber);
+ // Check if {value} is a BigInt.
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ __ GotoIf(__ Word32Equal(value_instance_type, __ Int32Constant(BIGINT_TYPE)),
+ &if_bigint);
+
// All other values that reach here are true.
__ Goto(done, __ Int32Constant(1));
@@ -1174,6 +1171,15 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
__ LoadField(AccessBuilder::ForHeapNumberValue(), value);
__ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
}
+
+ __ Bind(&if_bigint);
+ {
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ Node* length_is_zero = __ WordEqual(
+ __ WordAnd(bitfield, __ IntPtrConstant(BigInt::LengthBits::kMask)),
+ __ IntPtrConstant(0));
+ __ Goto(done, __ Word32Equal(length_is_zero, zero));
+ }
}
Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
@@ -1294,9 +1300,11 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
+ frame_state);
return index;
}
@@ -1305,9 +1313,12 @@ Node* EffectControlLinearizer::LowerMaskIndexWithBound(Node* node) {
if (mask_array_index_ == kMaskArrayIndex) {
Node* limit = node->InputAt(1);
- Node* mask = __ Word32Sar(__ Word32Or(__ Int32Sub(limit, index), index),
- __ Int32Constant(31));
- mask = __ Word32Xor(mask, __ Int32Constant(-1));
+ // mask = ((index - limit) & ~index) >> 31
+ // index = index & mask
+ Node* neg_index = __ Word32Xor(index, __ Int32Constant(-1));
+ Node* mask =
+ __ Word32Sar(__ Word32And(__ Int32Sub(index, limit), neg_index),
+ __ Int32Constant(31));
index = __ Word32And(index, mask);
}
return index;
@@ -1346,10 +1357,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* bitfield3 =
__ LoadField(AccessBuilder::ForMapBitField3(), value_map);
Node* if_not_deprecated = __ WordEqual(
- __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+ __ Word32And(bitfield3,
+ __ Int32Constant(Map::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongMap, p.feedback(),
+ if_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
@@ -1360,8 +1372,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
- __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, p.feedback(),
+ check, frame_state);
}
// Reload the current map of the {value}.
@@ -1372,7 +1384,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1390,7 +1403,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1423,6 +1437,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
auto if_not_smi = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -1434,7 +1449,8 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check1, frame_state);
__ Goto(&done);
__ Bind(&done);
@@ -1452,8 +1468,8 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* check = __ Uint32LessThanOrEqual(
__ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, check,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1464,12 +1480,14 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
Node* check =
__ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, VectorSlotPair(), check,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
@@ -1477,7 +1495,8 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* check = __ Uint32LessThan(value_instance_type,
__ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ check, frame_state);
return value;
}
@@ -1494,7 +1513,8 @@ Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
value_instance_type,
__ Int32Constant(kStringRepresentationMask | kIsNotStringMask)),
__ Int32Constant(kSeqStringTag | kStringTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1510,7 +1530,8 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
__ Word32And(value_instance_type,
__ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
__ Int32Constant(kInternalizedTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1518,7 +1539,7 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
__ DeoptimizeIfNot(DeoptimizeKind::kEager, DeoptimizeReasonOf(node->op()),
- value, frame_state);
+ VectorSlotPair(), value, frame_state);
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
@@ -1528,7 +1549,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* value = __ Int32AddWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1539,7 +1561,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
Node* value = __ Int32SubWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1567,11 +1590,13 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is zero.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is zero, as that would produce minus zero.
check = __ Word32Equal(lhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
@@ -1584,7 +1609,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is -1.
Node* minusone = __ Int32Constant(-1);
Node* is_minus_one = __ Word32Equal(rhs, minusone);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), is_minus_one,
+ frame_state);
__ Goto(&minint_check_done);
__ Bind(&minint_check_done);
@@ -1597,7 +1623,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if the remainder is non-zero.
Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1645,7 +1672,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(vtrue0, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&rhs_checked, vtrue0);
}
@@ -1679,7 +1707,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Check if we would have to return -0.
Node* check = __ Word32Equal(vtrue1, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&done, vtrue1);
}
@@ -1696,14 +1725,16 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer division.
Node* value = __ Uint32Div(lhs, rhs);
// Check if the remainder is non-zero.
check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1716,7 +1747,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer modulus.
return __ Uint32Mod(lhs, rhs);
@@ -1730,7 +1762,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
Node* projection = __ Int32MulWithOverflow(lhs, rhs);
Node* check = __ Projection(1, projection);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
Node* value = __ Projection(0, projection);
@@ -1745,7 +1778,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
__ Bind(&if_zero);
// We may need to return negative zero.
Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check_or,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1758,35 +1792,42 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* add = __ Int32AddWithOverflow(value, value);
Node* check = __ Projection(1, add);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
+ frame_state);
return __ Projection(0, add);
}
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, unsafe, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), unsafe,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
return ChangeUint32ToSmi(value);
}
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
- CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value32 = __ RoundFloat64ToInt32(value);
Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
+ check_same, frame_state);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
@@ -1801,7 +1842,8 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1811,22 +1853,27 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
- return BuildCheckedFloat64ToInt32(mode, value, frame_state);
+ return BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), value,
+ frame_state);
}
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return ChangeSmiToInt32(value);
}
Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1842,9 +1889,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_map, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check_map, frame_state);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+ vfalse = BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), vfalse,
+ frame_state);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -1852,13 +1901,14 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
}
Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
switch (mode) {
case CheckTaggedInputMode::kNumber: {
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_number,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
+ check_number, frame_state);
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
@@ -1871,8 +1921,8 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* check_oddball =
__ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, feedback,
+ check_oddball, frame_state);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
__ Goto(&check_done);
@@ -1896,8 +1946,8 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
// In the Smi case, just convert to int32 and then float64.
// Otherwise, check heap numberness and load the number.
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ mode, VectorSlotPair(), value, frame_state);
__ Goto(&done, number);
__ Bind(&if_smi);
@@ -1912,9 +1962,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1922,9 +1974,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1950,7 +2004,8 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
Node* node, Node* frame_state) {
- CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ const CheckTaggedInputParameters& params =
+ CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeLabel();
@@ -1964,8 +2019,8 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
__ Bind(&if_not_smi);
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ params.mode(), params.feedback(), value, frame_state);
number = __ TruncateFloat64ToWord32(number);
__ Goto(&done, number);
@@ -1980,6 +2035,19 @@ Node* EffectControlLinearizer::LowerAllocate(Node* node) {
return new_node;
}
+Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
+ Node* argument = node->InputAt(0);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kNumberToString);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), argument,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value = node->InputAt(0);
@@ -2039,9 +2107,10 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* vfalse =
+ __ Word32Equal(__ Int32Constant(Map::IsCallableBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2063,10 +2132,10 @@ Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse =
- __ Word32Equal(__ Int32Constant(1 << Map::kIsConstructor),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsConstructor)));
+ Node* vfalse = __ Word32Equal(
+ __ Int32Constant(Map::IsConstructorBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsConstructorBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2089,10 +2158,10 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
+ __ Int32Constant(Map::IsCallableBit::kMask),
__ Word32And(value_bit_field,
- __ Int32Constant((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable))));
+ __ Int32Constant((Map::IsCallableBit::kMask) |
+ (Map::IsUndetectableBit::kMask))));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2102,6 +2171,13 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
+ Node* value = node->InputAt(0);
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ return check;
+}
+
Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
Node* value = node->InputAt(0);
Node* zero = __ Int32Constant(0);
@@ -2169,9 +2245,10 @@ Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* check2 = __ Word32Equal(
- __ Int32Constant(0),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* check2 =
+ __ Word32Equal(__ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, check2);
__ Bind(&if_primitive);
@@ -2283,9 +2360,10 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Word32Equal(__ Int32Constant(0),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsUndetectable))),
+ __ Word32Equal(
+ __ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsUndetectableBit::kMask))),
__ Int32Constant(0));
__ Goto(&done, vfalse);
@@ -2511,6 +2589,52 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
__ SmiConstant(mapped_count), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
+ Node* length = node->InputAt(0);
+ Node* first = node->InputAt(1);
+ Node* second = node->InputAt(2);
+
+ // Determine the instance types of {first} and {second}.
+ Node* first_map = __ LoadField(AccessBuilder::ForMap(), first);
+ Node* first_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), first_map);
+ Node* second_map = __ LoadField(AccessBuilder::ForMap(), second);
+ Node* second_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), second_map);
+
+ // Determine the proper map for the resulting ConsString.
+ // If both {first} and {second} are one-byte strings, we
+ // create a new ConsOneByteString, otherwise we create a
+ // new ConsString instead.
+ auto if_onebyte = __ MakeLabel();
+ auto if_twobyte = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ Node* instance_type = __ Word32And(first_instance_type, second_instance_type);
+ Node* encoding =
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask));
+ __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
+ &if_twobyte, &if_onebyte);
+ __ Bind(&if_onebyte);
+ __ Goto(&done,
+ jsgraph()->HeapConstant(factory()->cons_one_byte_string_map()));
+ __ Bind(&if_twobyte);
+ __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map()));
+ __ Bind(&done);
+ Node* result_map = done.PhiAt(0);
+
+ // Allocate the resulting ConsString.
+ Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(ConsString::kSize));
+ __ StoreField(AccessBuilder::ForMap(), result, result_map);
+ __ StoreField(AccessBuilder::ForNameHashField(), result,
+ jsgraph()->Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), result, length);
+ __ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
+ __ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
+ return result;
+}
+
Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
Node* value = node->InputAt(0);
@@ -2538,6 +2662,15 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (input->opcode() != IrOpcode::kUnreachable) {
+ Node* unreachable = __ Unreachable();
+ NodeProperties::ReplaceValueInput(node, unreachable, 0);
+ }
+ return node;
+}
+
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Node* string = node->InputAt(0);
@@ -2580,19 +2713,25 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+ MachineType::TaggedSigned());
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LoadFromString(Node* receiver, Node* position,
+ Node* is_one_byte) {
auto one_byte_load = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
- Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
- Node* is_one_byte = __ Word32Equal(
- __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kOneByteStringTag));
-
__ GotoIf(is_one_byte, &one_byte_load);
Node* two_byte_result = __ LoadElement(
AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
@@ -2607,6 +2746,85 @@ Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ return LoadFromString(receiver, position, is_one_byte);
+}
+
+Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
+ Node* node, UnicodeEncoding encoding) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ Node* first_char_code = LoadFromString(receiver, position, is_one_byte);
+
+ auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* first_out =
+ __ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xD800));
+ // Return first character code.
+ __ GotoIf(first_out, &return_result, first_char_code);
+ // Check if position + 1 is still in range.
+ Node* length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ Node* next_position = __ Int32Add(position, __ Int32Constant(1));
+ Node* next_position_in_range = __ Int32LessThan(next_position, length);
+ __ GotoIf(next_position_in_range, &return_result, first_char_code);
+
+ // Load second character code.
+ Node* second_char_code = LoadFromString(receiver, next_position, is_one_byte);
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* second_out =
+ __ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xDC00));
+ __ GotoIfNot(second_out, &return_result, first_char_code);
+
+ Node* result;
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ result = __ Word32Or(
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+ __ Word32Shl(first_char_code, __ Int32Constant(16)),
+ second_char_code);
+#else
+ __ Word32Shl(second_char_code, __ Int32Constant(16)),
+ first_char_code);
+#endif
+ break;
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF16 surrogate pair into |word32| code point, encoded as
+ // UTF32.
+ Node* surrogate_offset =
+ __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+
+ // (lead << 10) + trail + SURROGATE_OFFSET
+ result = __ Int32Add(__ Word32Shl(first_char_code, __ Int32Constant(10)),
+ __ Int32Add(second_char_code, surrogate_offset));
+ break;
+ }
+ }
+ __ Goto(&return_result, result);
+
+ __ Bind(&return_result);
+ return return_result.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
@@ -2836,6 +3054,12 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringLength(Node* node) {
+ Node* subject = node->InputAt(0);
+
+ return __ LoadField(AccessBuilder::ForStringLength(), subject);
+}
+
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Node* node) {
Node* lhs = node->InputAt(0);
@@ -2872,7 +3096,8 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* value = node->InputAt(0);
Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
__ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2881,7 +3106,8 @@ Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = __ WordEqual(value, __ TheHoleConstant());
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2918,8 +3144,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Bind(&if_notsame);
{
// Now {val} could still be a non-internalized String that matches {exp}.
- __ DeoptimizeIf(DeoptimizeReason::kWrongName, ObjectIsSmi(val),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongName, VectorSlotPair(),
+ ObjectIsSmi(val), frame_state);
Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
Node* val_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
@@ -2937,7 +3163,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// Check that the {val} is a non-internalized String, if it's anything
// else it cannot match the recorded feedback {exp} anyways.
__ DeoptimizeIfNot(
- DeoptimizeReason::kWrongName,
+ DeoptimizeReason::kWrongName, VectorSlotPair(),
__ Word32Equal(__ Word32And(val_instance_type,
__ Int32Constant(kIsNotStringMask |
kIsNotInternalizedMask)),
@@ -2956,7 +3182,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Call(common()->Call(desc), try_internalize_string_function, val);
// Now see if the results match.
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_internalized), frame_state);
__ Goto(&if_same);
}
@@ -2966,7 +3192,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// The {val} is a ThinString, let's check the actual value.
Node* val_actual =
__ LoadField(AccessBuilder::ForThinStringActual(), val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_actual), frame_state);
__ Goto(&if_same);
}
@@ -2980,7 +3206,8 @@ void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
Node* exp = node->InputAt(0);
Node* val = node->InputAt(1);
Node* check = __ WordEqual(exp, val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), check,
+ frame_state);
}
Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
@@ -3135,7 +3362,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Node* frame_state) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
@@ -3154,7 +3381,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
// We need to grow the {elements} for {object}.
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
- (mode == GrowFastElementsMode::kDoubleElements)
+ (params.mode() == GrowFastElementsMode::kDoubleElements)
? Builtins::CallableFor(isolate(), Builtins::kGrowFastDoubleElements)
: Builtins::CallableFor(isolate(),
Builtins::kGrowFastSmiOrObjectElements);
@@ -3166,10 +3393,8 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
ChangeInt32ToSmi(index), __ NoContextConstant());
// Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_elements),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
+ ObjectIsSmi(new_elements), frame_state);
__ Goto(&done, new_elements);
__ Bind(&done);
@@ -3723,12 +3948,13 @@ void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
}
void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
- BailoutReason reason = BailoutReasonOf(node->op());
+ AbortReason reason = AbortReasonOf(node->op());
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kAbort;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), jsgraph()->SmiConstant(reason),
+ __ Call(desc, __ CEntryStubConstant(1),
+ jsgraph()->SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
}
@@ -4165,14 +4391,14 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Node* EffectControlLinearizer::ComputeIntegerHash(Node* value) {
// See v8::internal::ComputeIntegerHash()
- value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xffffffff)),
+ value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xFFFFFFFF)),
__ Word32Shl(value, __ Int32Constant(15)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
value = __ Int32Mul(value, __ Int32Constant(2057));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
- value = __ Word32And(value, __ Int32Constant(0x3fffffff));
+ value = __ Word32And(value, __ Int32Constant(0x3FFFFFFF));
return value;
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 7cf6910386..47b1586d6d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
Node* LowerAllocate(Node* node);
+ Node* LowerNumberToString(Node* node);
Node* LowerObjectIsArrayBufferView(Node* node);
Node* LowerObjectIsBigInt(Node* node);
Node* LowerObjectIsCallable(Node* node);
@@ -104,22 +105,28 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerObjectIsString(Node* node);
Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNumberIsFloat64Hole(Node* node);
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
+ Node* LowerNewConsString(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerSameValue(Node* node);
+ Node* LowerDeadValue(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node);
+ Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
@@ -156,9 +163,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
Node* AllocateHeapNumberWithValue(Node* node);
- Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
Node* frame_state);
Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback,
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
@@ -173,6 +182,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
+ Node* LoadFromString(Node* receiver, Node* position, Node* is_one_byte);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index aa2a1b2f3a..16a9d78faf 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -33,18 +33,39 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(
arguments_elements_(zone),
zone_(zone) {}
-Node* EscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
- // We might need to guard the replacement if the type of the {replacement}
- // node is not in a sub-type relation to the type of the the {original} node.
+Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
+ Node* replacement) {
+ const VirtualObject* vobject =
+ analysis_result().GetVirtualObject(replacement);
+ if (replacement->opcode() == IrOpcode::kDead ||
+ (vobject && !vobject->HasEscaped())) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
+ }
Type* const replacement_type = NodeProperties::GetType(replacement);
Type* const original_type = NodeProperties::GetType(original);
- if (!replacement_type->Is(original_type)) {
- Node* const control = NodeProperties::GetControlInput(original);
- replacement = jsgraph()->graph()->NewNode(
- jsgraph()->common()->TypeGuard(original_type), replacement, control);
- NodeProperties::SetType(replacement, original_type);
+ if (replacement_type->Is(original_type)) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
}
- return replacement;
+
+ // We need to guard the replacement if we would widen the type otherwise.
+ DCHECK_EQ(1, original->op()->EffectOutputCount());
+ DCHECK_EQ(1, original->op()->EffectInputCount());
+ DCHECK_EQ(1, original->op()->ControlInputCount());
+ Node* effect = NodeProperties::GetEffectInput(original);
+ Node* control = NodeProperties::GetControlInput(original);
+ original->TrimInputCount(0);
+ original->AppendInput(jsgraph()->zone(), replacement);
+ original->AppendInput(jsgraph()->zone(), effect);
+ original->AppendInput(jsgraph()->zone(), control);
+ NodeProperties::SetType(
+ original,
+ Type::Intersect(original_type, replacement_type, jsgraph()->zone()));
+ NodeProperties::ChangeOp(original,
+ jsgraph()->common()->TypeGuard(original_type));
+ ReplaceWithValue(original, original, original, control);
+ return NoChange();
}
namespace {
@@ -74,11 +95,7 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
DCHECK(node->opcode() != IrOpcode::kAllocate &&
node->opcode() != IrOpcode::kFinishRegion);
DCHECK_NE(replacement, node);
- if (replacement != jsgraph()->Dead()) {
- replacement = MaybeGuard(node, replacement);
- }
- RelaxEffectsAndControls(node);
- return Replace(replacement);
+ return ReplaceNode(node, replacement);
}
switch (node->opcode()) {
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index b89d4d03e8..29290d3a0a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
void ReduceFrameStateInputs(Node* node);
Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
Node* ObjectIdNode(const VirtualObject* vobject);
- Node* MaybeGuard(Node* original, Node* replacement);
+ Reduction ReplaceNode(Node* original, Node* replacement);
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysisResult analysis_result() const { return analysis_result_; }
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index b3b1abb6df..4b773136a9 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -223,8 +223,12 @@ class EscapeAnalysisTracker : public ZoneObject {
replacement_ = replacement;
vobject_ =
replacement ? tracker_->virtual_objects_.Get(replacement) : nullptr;
- TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
- replacement->id());
+ if (replacement) {
+ TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
+ replacement->id());
+ } else {
+ TRACE("Set nullptr as replacement.\n");
+ }
}
void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
@@ -248,10 +252,6 @@ class EscapeAnalysisTracker : public ZoneObject {
Node* GetReplacementOf(Node* node) { return replacements_[node]; }
Node* ResolveReplacement(Node* node) {
if (Node* replacement = GetReplacementOf(node)) {
- // Replacements cannot have replacements. This is important to ensure
- // re-visitation: If a replacement is replaced, then all nodes accessing
- // the replacement have to be updated.
- DCHECK_NULL(GetReplacementOf(replacement));
return replacement;
}
return node;
@@ -768,7 +768,12 @@ EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
jsgraph_(jsgraph) {}
Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
- return tracker_->GetReplacementOf(node);
+ Node* replacement = tracker_->GetReplacementOf(node);
+ // Replacements cannot have replacements. This is important to ensure
+ // re-visitation: If a replacement is replaced, then all nodes accessing
+ // the replacement have to be updated.
+ if (replacement) DCHECK_NULL(tracker_->GetReplacementOf(replacement));
+ return replacement;
}
Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index e0284c8ab4..0b6d7ac193 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -13,13 +13,22 @@ namespace internal {
namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots)
- : frame_slot_count_(fixed_frame_size_in_slots),
+ : fixed_slot_count_(fixed_frame_size_in_slots),
+ frame_slot_count_(fixed_frame_size_in_slots),
spill_slot_count_(0),
+ return_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
int alignment_slots = alignment / kPointerSize;
+ // We have to align return slots separately, because they are claimed
+ // separately on the stack.
+ int return_delta =
+ alignment_slots - (return_slot_count_ & (alignment_slots - 1));
+ if (return_delta != alignment_slots) {
+ frame_slot_count_ += return_delta;
+ }
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index fe8008913d..f5c36dba17 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -22,7 +22,7 @@ class CallDescriptor;
// into them. Mutable state associated with the frame is stored separately in
// FrameAccessState.
//
-// Frames are divided up into three regions.
+// Frames are divided up into four regions.
// - The first is the fixed header, which always has a constant size and can be
// predicted before code generation begins depending on the type of code being
// generated.
@@ -33,11 +33,15 @@ class CallDescriptor;
// reserved after register allocation, since its size can only be precisely
// determined after register allocation once the number of used callee-saved
// register is certain.
+// - The fourth region is a scratch area for return values from other functions
+// called, if multiple returns cannot all be passed in registers. This region
+// Must be last in a stack frame, so that it is positioned immediately below
+// the stack frame of a callee to store to.
//
// The frame region immediately below the fixed header contains spill slots
// starting at slot 4 for JSFunctions. The callee-saved frame region below that
-// starts at 4+spill_slot_count_. Callee stack slots corresponding to
-// parameters are accessible through negative slot ids.
+// starts at 4+spill_slot_count_. Callee stack slots correspond to
+// parameters that are accessible through negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -73,7 +77,13 @@ class CallDescriptor;
// |- - - - - - - - -| | |
// | ... | Callee-saved |
// |- - - - - - - - -| | |
-// m+r+3 | callee-saved r | v v
+// m+r+3 | callee-saved r | v |
+// +-----------------+---- |
+// m+r+4 | return 0 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Return |
+// |- - - - - - - - -| | |
+// | return q-1 | v v
// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
@@ -81,8 +91,9 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
-
+ inline int GetFixedSlotCount() const { return fixed_slot_count_; }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
+ inline int GetReturnSlotCount() const { return return_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK_NULL(allocated_registers_);
@@ -112,19 +123,25 @@ class Frame : public ZoneObject {
}
int AllocateSpillSlot(int width, int alignment = 0) {
+ DCHECK_EQ(frame_slot_count_,
+ fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
int frame_slot_count_before = frame_slot_count_;
- if (alignment <= kPointerSize) {
- AllocateAlignedFrameSlots(width);
- } else {
- // We need to allocate more place for spill slot
- // in case we need an aligned spill slot to be
- // able to properly align start of spill slot
- // and still have enough place to hold all the
- // data
- AllocateAlignedFrameSlots(width + alignment - kPointerSize);
+ if (alignment > kPointerSize) {
+ // Slots are pointer sized, so alignment greater than a pointer size
+ // requires allocating additional slots.
+ width += alignment - kPointerSize;
}
+ AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
- return frame_slot_count_ - 1;
+ return frame_slot_count_ - return_slot_count_ - 1;
+ }
+
+ void EnsureReturnSlots(int count) {
+ if (count > return_slot_count_) {
+ count -= return_slot_count_;
+ frame_slot_count_ += count;
+ return_slot_count_ += count;
+ }
}
int AlignFrame(int alignment = kDoubleSize);
@@ -152,8 +169,10 @@ class Frame : public ZoneObject {
}
private:
+ int fixed_slot_count_;
int frame_slot_count_;
int spill_slot_count_;
+ int return_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 3dc1ee27c9..4542a73685 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -5,7 +5,6 @@
#include "src/compiler/gap-resolver.h"
#include <algorithm>
-#include <functional>
#include <set>
namespace v8 {
@@ -19,10 +18,6 @@ namespace {
const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
-inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
- return !move->IsEliminated() && move->source().InterferesWith(destination);
-}
-
// Splits a FP move between two location operands into the equivalent series of
// moves between smaller sub-operands, e.g. a double move to two single moves.
// This helps reduce the number of cycles that would normally occur under FP
@@ -53,7 +48,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
src_index = src_loc.register_code() * aliases;
} else {
src_index = src_loc.index();
- // For operands that occuply multiple slots, the index refers to the last
+ // For operands that occupy multiple slots, the index refers to the last
// slot. On little-endian architectures, we start at the high slot and use a
// negative step so that register-to-slot moves are in the correct order.
src_step = -slot_size;
@@ -197,8 +192,11 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
- auto blocker = std::find_if(moves->begin(), moves->end(),
- std::bind2nd(std::ptr_fun(&Blocks), destination));
+ auto blocker =
+ std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
+ return !move->IsEliminated() &&
+ move->source().InterferesWith(destination);
+ });
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
assembler_->AssembleMove(&source, &destination);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 50001976a9..a0b2e0ff0a 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -134,6 +134,11 @@ Node* GraphAssembler::DebugBreak() {
current_effect_, current_control_);
}
+Node* GraphAssembler::Unreachable() {
+ return current_effect_ = graph()->NewNode(common()->Unreachable(),
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
return current_effect_ =
@@ -164,24 +169,33 @@ Node* GraphAssembler::ToNumber(Node* value) {
value, NoContextConstant(), current_effect_);
}
-Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+Node* GraphAssembler::BitcastWordToTagged(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastWordToTagged(), value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
- frame_state, current_effect_, current_control_);
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, current_effect_, current_control_);
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
- DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason), condition, frame_state,
- current_effect_, current_control_);
+ common()->DeoptimizeUnless(kind, reason, feedback), condition,
+ frame_state, current_effect_, current_control_);
}
-Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
- return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, condition,
+Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
+ return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
frame_state);
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 3d3c2ed103..9ae74d0df5 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -8,6 +8,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,8 +29,7 @@ namespace compiler {
V(RoundFloat64ToInt32) \
V(TruncateFloat64ToWord32) \
V(Float64ExtractHighWord32) \
- V(Float64Abs) \
- V(BitcastWordToTagged)
+ V(Float64Abs)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
@@ -193,9 +193,12 @@ class GraphAssembler {
// Debugging
Node* DebugBreak();
+ Node* Unreachable();
+
Node* Float64RoundDown(Node* value);
Node* ToNumber(Node* value);
+ Node* BitcastWordToTagged(Node* value);
Node* Allocate(PretenureFlag pretenure, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
@@ -209,12 +212,13 @@ class GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* condition, Node* frame_state);
- Node* DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
+ VectorSlotPair const& feedback, Node* condition,
Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
template <typename... Args>
Node* Call(const CallDescriptor* desc, Args... args);
template <typename... Args>
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index e57dc18b5e..edabae0b8a 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -15,7 +15,6 @@ namespace compiler {
// Forward declarations.
class Graph;
-
// Trims dead nodes from the node graph.
class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 8e9505bae1..47ded6a30c 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -175,17 +175,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xor_(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -298,425 +287,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN, \
- SingleOrDouble) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ pop(index1_); \
- __ xorp##SingleOrDouble(result_, result_); \
- __ divs##SingleOrDouble(result_, result_); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- bool need_cache = result_ != index1_; \
- if (need_cache) __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- if (need_cache) __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- if (need_cache) __ pop(index1_); \
- __ xor_(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto value = i.InputDoubleRegister(2); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(2)->IsRegister()) { \
- Register value = i.InputRegister(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_COMPARE(asm_instr) \
do { \
if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
@@ -1025,7 +595,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1449,6 +1019,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kLFence:
+ __ lfence();
+ break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -1892,6 +1465,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32Movdqu:
+ if (instr->HasOutput()) {
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ Movdqu(operand, i.InputSimd128Register(index));
+ }
+ break;
case kIA32BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
@@ -1978,6 +1560,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
}
break;
+ case kIA32PushSimd128:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), i.InputSimd128Register(0));
+ } else {
+ __ movups(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), kScratchDoubleReg);
+ }
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
case kIA32Push:
if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
size_t index = 0;
@@ -1997,7 +1590,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kIA32Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2005,6 +1598,214 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kIA32Peek: {
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ movsd(i.OutputDoubleRegister(), Operand(ebp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ movss(i.OutputFloatRegister(), Operand(ebp, offset));
+ }
+ } else {
+ __ mov(i.OutputRegister(), Operand(ebp, offset));
+ }
+ break;
+ }
+ case kSSEF32x4Splat: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kAVXF32x4Splat: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputFloatRegister(0);
+ __ vshufps(i.OutputSimd128Register(), src, src, 0x0);
+ break;
+ }
+ case kSSEF32x4ExtractLane: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputFloatRegister();
+ int8_t lane = i.InputInt8(1);
+ if (lane != 0) {
+ DCHECK_LT(lane, 4);
+ __ shufps(dst, dst, lane);
+ }
+ break;
+ }
+ case kAVXF32x4ExtractLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputFloatRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ if (lane == 0) {
+ if (dst != src) __ vmovaps(dst, src);
+ } else {
+ DCHECK_LT(lane, 4);
+ __ vshufps(dst, src, src, lane);
+ }
+ break;
+ }
+ case kSSEF32x4ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ insertps(i.OutputSimd128Register(), i.InputOperand(2),
+ i.InputInt8(1) << 4);
+ break;
+ }
+ case kAVXF32x4ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1) << 4);
+ break;
+ }
+ case kSSEF32x4Abs: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ psrld(dst, 1);
+ __ andps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Abs: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pslld(dst, 31);
+ __ xorps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Neg: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Add: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vaddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Sub: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vsubps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmulps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Min: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vminps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Max: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmaxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Eq: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Ne: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Lt: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Le: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2774,52 +2575,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ case kIA32S128Zero: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pxor(dst, dst);
break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ }
+ case kSSES128Not: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, src);
+ }
break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
+ }
+ case kAVXS128Not: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
+ }
+ case kSSES128And: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pand(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ }
+ case kAVXS128And: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ }
+ case kSSES128Or: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ por(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ }
+ case kAVXS128Or: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ }
+ case kSSES128Xor: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pxor(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ }
+ case kAVXS128Xor: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
@@ -3038,7 +2855,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3287,7 +3104,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3331,12 +3148,13 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ sub(esp, Immediate(shrink_slots * kPointerSize));
}
@@ -3348,6 +3166,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (((1 << i) & saves)) __ push(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3356,6 +3179,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ add(esp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i));
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index b9bf261022..a17d9f06ce 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -43,6 +43,7 @@ namespace compiler {
V(IA32Lzcnt) \
V(IA32Tzcnt) \
V(IA32Popcnt) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -103,14 +104,45 @@ namespace compiler {
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32Movdqu) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32PushFloat32) \
V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
V(IA32Poke) \
+ V(IA32Peek) \
V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -229,7 +261,16 @@ namespace compiler {
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
- V(AVXI8x16GeU)
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 83c60e4455..db43c1ed1c 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kSSEF32x4Splat:
+ case kAVXF32x4Splat:
+ case kSSEF32x4ExtractLane:
+ case kAVXF32x4ExtractLane:
+ case kSSEF32x4ReplaceLane:
+ case kAVXF32x4ReplaceLane:
+ case kSSEF32x4Abs:
+ case kAVXF32x4Abs:
+ case kSSEF32x4Neg:
+ case kAVXF32x4Neg:
+ case kSSEF32x4Add:
+ case kAVXF32x4Add:
+ case kSSEF32x4Sub:
+ case kAVXF32x4Sub:
+ case kSSEF32x4Mul:
+ case kAVXF32x4Mul:
+ case kSSEF32x4Min:
+ case kAVXF32x4Min:
+ case kSSEF32x4Max:
+ case kAVXF32x4Max:
+ case kSSEF32x4Eq:
+ case kAVXF32x4Eq:
+ case kSSEF32x4Ne:
+ case kAVXF32x4Ne:
+ case kSSEF32x4Lt:
+ case kAVXF32x4Lt:
+ case kSSEF32x4Le:
+ case kAVXF32x4Le:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -216,6 +244,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GtU:
case kSSEI8x16GeU:
case kAVXI8x16GeU:
+ case kIA32S128Zero:
+ case kSSES128Not:
+ case kAVXS128Not:
+ case kSSES128And:
+ case kAVXS128And:
+ case kSSES128Or:
+ case kAVXS128Or:
+ case kSSES128Xor:
+ case kAVXS128Xor:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -235,16 +272,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Movl:
case kIA32Movss:
case kIA32Movsd:
+ case kIA32Movdqu:
// Moves are used for memory load/store operations.
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32StackCheck:
+ case kIA32Peek:
return kIsLoadOperation;
case kIA32Push:
case kIA32PushFloat32:
case kIA32PushFloat64:
+ case kIA32PushSimd128:
case kIA32Poke:
+ case kLFence:
return kHasSideEffect;
#define CASE(Name) case k##Name:
@@ -262,18 +303,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kIA32Imul:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index bae563d7b6..d8bf250ec6 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -225,6 +225,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -249,8 +254,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -339,8 +346,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -379,156 +388,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- offset_operand, g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
- }
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value)
- : ((rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit)
- ? g.UseByteRegister(value)
- : g.UseRegister(value));
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, value_operand, offset_operand,
- g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
- offset_operand, length_operand, value_operand, g.UseRegister(buffer),
- offset_operand);
- }
-}
-
namespace {
// Shared routine for multiple binary operations.
@@ -599,7 +458,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1110,11 +970,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1123,29 +983,30 @@ void InstructionSelector::EmitPrepareArguments(
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- Node* input_node = input.node();
- if (input.node() == nullptr) continue;
- if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+ if (input.node == nullptr) continue;
+ if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kIA32Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
+ g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
: IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- if (input.type() == MachineType::Float32()) {
+ sequence()->IsFP(GetVirtualRegister(input.node))
+ ? g.UseRegister(input.node)
+ : g.Use(input.node);
+ if (input.location.GetType() == MachineType::Float32()) {
Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.type() == MachineType::Float64()) {
+ } else if (input.location.GetType() == MachineType::Float64()) {
Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else if (input.location.GetType() == MachineType::Simd128()) {
+ Emit(kIA32PushSimd128, g.NoOutput(), value);
} else {
Emit(kIA32Push, g.NoOutput(), value);
}
@@ -1154,6 +1015,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ IA32OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kIA32Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1181,7 +1065,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1203,7 +1088,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
} else {
@@ -1389,7 +1275,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1503,14 +1390,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1897,12 +1784,21 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) \
- V(I32x4) \
- V(I16x8) \
+#define SIMD_INT_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
@@ -1948,13 +1844,21 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16GtU) \
- V(I8x16GeU)
-
-#define SIMD_UNOP_LIST(V) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_INT_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
V(I8x16Neg)
+#define SIMD_OTHER_UNOP_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(S128Not)
+
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
@@ -1963,11 +1867,38 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4Splat, g.DefineAsRegister(node), operand0);
+ } else {
+ Emit(kSSEF32x4Splat, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
+ } else {
+ Emit(kSSEF32x4ExtractLane, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32S128Zero, g.DefineAsRegister(node));
+}
+
+
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
VisitRO(this, node, kIA32##Type##Splat); \
}
-SIMD_TYPES(VISIT_SIMD_SPLAT)
+SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
#define VISIT_SIMD_EXTRACT_LANE(Type) \
@@ -1977,7 +1908,7 @@ SIMD_TYPES(VISIT_SIMD_SPLAT)
Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
-SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
#define VISIT_SIMD_REPLACE_LANE(Type) \
@@ -1994,7 +1925,8 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
operand1, operand2); \
} \
}
-SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
+VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#define VISIT_SIMD_SHIFT(Opcode) \
@@ -2011,13 +1943,22 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
-#define VISIT_SIMD_UNOP(Opcode) \
+#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
-#undef VISIT_SIMD_UNOP
+SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
+#undef VISIT_SIMD_INT_UNOP
+
+#define VISIT_SIMD_OTHER_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
+ Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ }
+SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
+#undef VISIT_SIMD_OTHER_UNOP
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2039,7 +1980,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz;
+ MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index f5457ee562..df3078d739 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -68,20 +68,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
V(ArchStackSlot) \
V(AtomicLoadInt8) \
V(AtomicLoadUint8) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index b1164767f2..f7afaab697 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -268,21 +268,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kIeee754Float64Sinh:
case kIeee754Float64Tan:
case kIeee754Float64Tanh:
-#ifdef V8_TARGET_ARCH_ARM64
- // This is an unfortunate effect of arm64 dual stack pointers:
- // * TruncateDoubleToI may call a stub, and the stub will push and pop
- // values onto the stack. Push updates both CSP and JSSP but pop only
- // restores JSSP.
- // * kIeee754XXX opcodes call a C Function and the call macro may update
- // CSP to meet alignment requirements but it will not bring back CSP to
- // its original value.
- // Those opcode cannot be reordered with instructions with side effects
- // such as Arm64ClaimCSP.
- // TODO(arm64): remove when JSSP is gone.
- return kHasSideEffect;
-#else
return kNoOpcodeFlags;
-#endif
case kArchStackPointer:
// ArchStackPointer instruction loads the current stack pointer value and
@@ -315,22 +301,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchThrowTerminator:
return kIsBlockTerminator;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return kIsLoadOperation;
-
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 8334d1751a..7c7a2708c5 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -251,6 +251,23 @@ class OperandGenerator {
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
return Constant(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kDeadValue: {
+ switch (DeadValueRepresentationOf(node->op())) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ return Constant(static_cast<int32_t>(0));
+ case MachineRepresentation::kFloat64:
+ return Constant(static_cast<double>(0));
+ case MachineRepresentation::kFloat32:
+ return Constant(static_cast<float>(0));
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
default:
break;
}
@@ -350,8 +367,9 @@ class FlagsContinuation final {
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state) {
- return FlagsContinuation(condition, kind, reason, frame_state);
+ return FlagsContinuation(condition, kind, reason, feedback, frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -382,6 +400,10 @@ class FlagsContinuation final {
DCHECK(IsDeoptimize());
return reason_;
}
+ VectorSlotPair const& feedback() const {
+ DCHECK(IsDeoptimize());
+ return feedback_;
+ }
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
@@ -452,11 +474,13 @@ class FlagsContinuation final {
private:
FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state)
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state)
: mode_(kFlags_deoptimize),
condition_(condition),
kind_(kind),
reason_(reason),
+ feedback_(feedback),
frame_state_or_result_(frame_state) {
DCHECK_NOT_NULL(frame_state);
}
@@ -480,6 +504,7 @@ class FlagsContinuation final {
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index d19692e3dd..c94b42b458 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -668,7 +668,7 @@ struct CallBuffer {
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
+ ZoneVector<PushParameter> output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes;
@@ -693,26 +693,38 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
- DCHECK_LE(call->op()->ValueOutputCount(),
- static_cast<int>(buffer->descriptor->ReturnCount()));
+ size_t ret_count = buffer->descriptor->ReturnCount();
+ DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
DCHECK_EQ(
call->op()->ValueInputCount(),
static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
- if (buffer->descriptor->ReturnCount() > 0) {
+ if (ret_count > 0) {
// Collect the projections that represent multiple outputs from this call.
- if (buffer->descriptor->ReturnCount() == 1) {
- buffer->output_nodes.push_back(call);
+ if (ret_count == 1) {
+ PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
+ buffer->output_nodes.push_back(result);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ buffer->output_nodes.resize(ret_count);
+ int stack_count = 0;
+ for (size_t i = 0; i < ret_count; ++i) {
+ LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+ buffer->output_nodes[i] = PushParameter(nullptr, location);
+ if (location.IsCallerFrameSlot()) {
+ stack_count += location.GetSizeInPointers();
+ }
+ }
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
- DCHECK_EQ(IrOpcode::kProjection, edge.from()->opcode());
- size_t const index = ProjectionIndexOf(edge.from()->op());
+ Node* node = edge.from();
+ DCHECK_EQ(IrOpcode::kProjection, node->opcode());
+ size_t const index = ProjectionIndexOf(node->op());
+
DCHECK_LT(index, buffer->output_nodes.size());
- DCHECK(!buffer->output_nodes[index]);
- buffer->output_nodes[index] = edge.from();
+ DCHECK(!buffer->output_nodes[index].node);
+ buffer->output_nodes[index].node = node;
}
+ frame_->EnsureReturnSlots(stack_count);
}
// Filter out the outputs that aren't live because no projection uses them.
@@ -722,22 +734,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ bool output_is_live = buffer->output_nodes[i].node != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
- MachineRepresentation rep =
- buffer->descriptor->GetReturnType(static_cast<int>(i))
- .representation();
- LinkageLocation location =
- buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+ LinkageLocation location = buffer->output_nodes[i].location;
+ MachineRepresentation rep = location.GetType().representation();
- Node* output = buffer->output_nodes[i];
+ Node* output = buffer->output_nodes[i].node;
InstructionOperand op = output == nullptr
? g.TempLocation(location)
: g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op);
- buffer->outputs.push_back(op);
+ if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ buffer->outputs.push_back(op);
+ buffer->output_nodes[i].node = nullptr;
+ }
}
}
}
@@ -803,7 +815,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
- DeoptimizeReason::kNoReason);
+ DeoptimizeReason::kUnknown, VectorSlotPair());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -842,8 +854,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1);
}
- PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
- buffer->pushed_nodes[stack_index] = parameter;
+ PushParameter param = {*iter, location};
+ buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
@@ -890,7 +902,6 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
@@ -960,7 +971,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
<< "only one predecessor." << std::endl
<< "# Current Block: " << *successor << std::endl
<< "# Node: " << *node;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
}
@@ -1026,7 +1037,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
- return VisitDeoptimize(p.kind(), p.reason(), value);
+ return VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -1136,6 +1147,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
+ case IrOpcode::kDeadValue:
+ VisitDeadValue(node);
+ return;
case IrOpcode::kComment:
VisitComment(node);
return;
@@ -1472,14 +1486,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
- case IrOpcode::kCheckedLoad: {
- MachineRepresentation rep =
- CheckedLoadRepresentationOf(node->op()).representation();
- MarkAsRepresentation(rep, node);
- return VisitCheckedLoad(node);
- }
- case IrOpcode::kCheckedStore:
- return VisitCheckedStore(node);
case IrOpcode::kInt32PairAdd:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
@@ -1525,6 +1531,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
+ case IrOpcode::kSpeculationFence:
+ return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -2089,12 +2097,6 @@ void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2102,73 +2104,36 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2221,79 +2186,11 @@ void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
@@ -2310,21 +2207,6 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
@@ -2333,17 +2215,6 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
@@ -2352,35 +2223,6 @@ void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2398,38 +2240,7 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2582,15 +2393,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
- if (from_native_stack != to_native_stack) {
- // (arm64 only) Mismatch in the use of stack pointers. One or the other
- // has to be restored manually by the code generator.
- flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
- : CallDescriptor::kRestoreCSP;
- }
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
@@ -2618,6 +2420,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
&buffer.instruction_args.front());
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
+
+ EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
@@ -2685,6 +2489,14 @@ void InstructionSelector::VisitTailCall(Node* node) {
Emit(kArchPrepareTailCall, g.NoOutput());
+ // Add an immediate operand that represents the first slot that is unused
+ // with respect to the stack pointer that has been updated for the tail call
+ // instruction. This is used by backends that need to pad arguments for stack
+ // alignment, in order to store an optional slot of padding above the
+ // arguments.
+ int optional_padding_slot = callee->GetFirstUnusedStackSlot();
+ buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
+
int first_unused_stack_slot =
(V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
stack_param_delta;
@@ -2724,29 +2536,31 @@ void InstructionSelector::VisitReturn(Node* ret) {
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
- Node* frame_state) {
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@@ -2757,7 +2571,7 @@ Instruction* InstructionSelector::EmitDeoptimize(
opcode |= MiscField::encode(static_cast<int>(input_count));
DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id =
- sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
+ sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2775,8 +2589,10 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* value) {
- EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
+ EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
+ feedback, value);
}
void InstructionSelector::VisitThrow(Node* node) {
@@ -2794,6 +2610,12 @@ void InstructionSelector::VisitUnreachable(Node* node) {
Emit(kArchDebugBreak, g.NoOutput());
}
+void InstructionSelector::VisitDeadValue(Node* node) {
+ OperandGenerator g(this);
+ MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
+ Emit(kArchDebugBreak, g.DefineAsConstant(node));
+}
+
void InstructionSelector::VisitComment(Node* node) {
OperandGenerator g(this);
InstructionOperand operand(g.UseImmediate(node));
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 2bd85d7dab..75c41c165f 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -10,6 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/globals.h"
@@ -30,17 +31,13 @@ class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
-class PushParameter {
- public:
- PushParameter() : node_(nullptr), type_(MachineType::None()) {}
- PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
-
- Node* node() const { return node_; }
- MachineType type() const { return type_; }
+struct PushParameter {
+ PushParameter(Node* n = nullptr,
+ LinkageLocation l = LinkageLocation::ForAnyRegister())
+ : node(n), location(l) {}
- private:
- Node* node_;
- MachineType type_;
+ Node* node;
+ LinkageLocation location;
};
enum class FrameStateInputKind { kAny, kStackSlot };
@@ -115,15 +112,20 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@@ -345,14 +347,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* value);
+ VectorSlotPair const& feedback, Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
void VisitUnreachable(Node* node);
+ void VisitDeadValue(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
+ void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
+ const CallDescriptor* descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index b1b322e1ee..f335177b95 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -927,10 +927,10 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int InstructionSequence::AddDeoptimizationEntry(
FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(
- DeoptimizationEntry(descriptor, kind, reason));
+ DeoptimizationEntry(descriptor, kind, reason, feedback));
return deoptimization_id;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index b0f6661274..7772f18ad9 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1317,17 +1317,22 @@ class DeoptimizationEntry final {
public:
DeoptimizationEntry() {}
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason)
- : descriptor_(descriptor), kind_(kind), reason_(reason) {}
+ DeoptimizeReason reason, VectorSlotPair const& feedback)
+ : descriptor_(descriptor),
+ kind_(kind),
+ reason_(reason),
+ feedback_(feedback) {}
FrameStateDescriptor* descriptor() const { return descriptor_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
FrameStateDescriptor* descriptor_ = nullptr;
DeoptimizeKind kind_ = DeoptimizeKind::kEager;
- DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
+ DeoptimizeReason reason_ = DeoptimizeReason::kUnknown;
+ VectorSlotPair feedback_ = VectorSlotPair();
};
typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
@@ -1586,7 +1591,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
- DeoptimizeKind kind, DeoptimizeReason reason);
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 042d9e0ef7..940f0904b3 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -316,9 +316,10 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kTailCall: {
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
- if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Int64())) {
+ bool returns_require_lowering =
+ GetReturnCountAfterLowering(descriptor) !=
+ static_cast<int>(descriptor->ReturnCount());
+ if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call
// descriptor is enough.
auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
@@ -688,7 +689,7 @@ void Int64Lowering::LowerNode(Node* node) {
Int32Matcher m(shift);
if (m.HasValue()) {
// Precondition: 0 <= shift < 64.
- int32_t shift_value = m.Value() & 0x3f;
+ int32_t shift_value = m.Value() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));
@@ -705,7 +706,7 @@ void Int64Lowering::LowerNode(Node* node) {
low_input = GetReplacementHigh(input);
high_input = GetReplacementLow(input);
}
- int32_t masked_shift_value = shift_value & 0x1f;
+ int32_t masked_shift_value = shift_value & 0x1F;
Node* masked_shift =
graph()->NewNode(common()->Int32Constant(masked_shift_value));
Node* inv_shift = graph()->NewNode(
@@ -726,7 +727,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (!machine()->Word32ShiftIsSafe()) {
safe_shift =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x1f)));
+ graph()->NewNode(common()->Int32Constant(0x1F)));
}
// By creating this bit-mask with SAR and SHL we do not have to deal
@@ -750,7 +751,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (machine()->Word32ShiftIsSafe()) {
masked_shift6 =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x3f)));
+ graph()->NewNode(common()->Int32Constant(0x3F)));
}
Diamond lt32(
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index df6fdba3f0..7ff2bf6d5e 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -109,49 +109,22 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
namespace {
-MaybeHandle<Map> GetMapWitness(Node* node) {
+Maybe<InstanceType> GetInstanceTypeWitness(Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &maps);
- if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
- return maps[0];
- }
- return MaybeHandle<Map>();
-}
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Isolate* isolate = jsarray_map->GetIsolate();
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number =
- descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
-}
+ if (result == NodeProperties::kNoReceiverMaps || maps.size() == 0) {
+ return Nothing<InstanceType>();
+ }
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
- if (!receiver_map->prototype()->IsJSArray()) return false;
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- // Ensure that all prototypes of the {receiver} are stable.
- for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
- !it.IsAtEnd(); it.Advance()) {
- Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
- if (!current->map()->is_stable()) return false;
+ InstanceType first_type = maps[0]->instance_type();
+ for (const Handle<Map>& map : maps) {
+ if (map->instance_type() != first_type) return Nothing<InstanceType>();
}
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- isolate->IsNoElementsProtectorIntact() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
- !IsReadOnlyLengthDescriptor(receiver_map);
+ return Just(first_type);
}
bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
@@ -189,7 +162,7 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map)) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kArray);
}
@@ -199,7 +172,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map) &&
receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kTypedArray);
@@ -313,8 +286,9 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -327,8 +301,8 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return NoChange();
}
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
if (IsHoleyElementsKind(elements_kind)) {
if (!isolate()->IsNoElementsProtectorIntact()) {
@@ -484,15 +458,16 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
Node* array = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
@@ -725,65 +700,58 @@ Reduction JSBuiltinReducer::ReduceTypedArrayToStringTag(Node* node) {
}
Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
- switch (receiver_map->instance_type()) {
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- default:
- // Slow array iterators are not reduced
- return NoChange();
- }
+ Maybe<InstanceType> maybe_type = GetInstanceTypeWitness(node);
+ if (!maybe_type.IsJust()) return NoChange();
+ InstanceType type = maybe_type.FromJust();
+ switch (type) {
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kValues);
+
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kValues);
+
+ default:
+ // Slow array iterators are not reduced
+ return NoChange();
}
- return NoChange();
}
// ES6 section 22.1.2.2 Array.isArray ( arg )
@@ -896,398 +864,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-// ES6 section 22.1.3.17 Array.prototype.pop ( )
-Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
- Handle<Map> receiver_map;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, efalse, if_false);
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, efalse, if_false);
-
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, length, efalse, if_false);
-
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.18 Array.prototype.push ( )
-Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- int const num_values = node->op()->ValueInputCount() - 2;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (receiver_maps.size() != 1) return NoChange();
- DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
-
- // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
- Handle<Map> receiver_map = receiver_maps[0];
- if (CanInlineArrayResizeOperation(receiver_map)) {
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // If the {receiver_maps} information is not reliable, we need
- // to check that the {receiver} still has one of these maps.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- if (receiver_map->is_stable()) {
- dependencies()->AssumeMapStable(receiver_map);
- } else {
- // TODO(turbofan): This is a potential - yet unlikely - deoptimization
- // loop, since we might not learn from this deoptimization in baseline
- // code. We need a way to learn from deoptimizations in optimized to
- // address these problems.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
- receiver, effect, control);
- }
- }
-
- // TODO(turbofan): Perform type checks on the {values}. We are not
- // guaranteed to learn from these checks in case they fail, as the witness
- // (i.e. the map check from the LoadIC for a.push) might not be executed in
- // baseline code (after we stored the value in the builtin and thereby
- // changed the elements kind of a) before be decide to optimize this
- // function again. We currently don't have a proper way to deal with this;
- // the proper solution here is to learn on deopt, i.e. disable
- // Array.prototype.push inlining for this function.
- for (auto& value : values) {
- if (IsSmiElementsKind(receiver_map->elements_kind())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
- }
- }
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
- Node* value = length;
-
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
-
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // TODO(turbofan): Check if we need to grow the {elements} backing store.
- // This will deopt if we cannot grow the array further, and we currently
- // don't necessarily learn from it. See the comment on the value type
- // check above.
- GrowFastElementsMode mode =
- IsDoubleElementsKind(receiver_map->elements_kind())
- ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, new_length, effect, control);
-
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
- effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, index, value, effect, control);
- }
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.22 Array.prototype.shift ( )
-Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
-
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
-
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
-
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
-
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
-
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
-
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind());
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(), index,
- jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, etrue1, if_true1);
-
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
- kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry = jsgraph()->ExternalConstant(
- ExternalReference(builtin_entry, isolate()));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc,
- context, frame_state, efalse1, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
@@ -1451,6 +1027,7 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
index = effect = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
index, jsgraph()->NoContextConstant(), effect);
+ NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
// Update the {index} and {table} on the {receiver}.
effect = graph()->NewNode(
@@ -1562,8 +1139,9 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
// Abort loop with resulting value.
Node* control = graph()->NewNode(common()->IfFalse(), branch1);
Node* effect = etrue0;
- Node* value = graph()->NewNode(
- common()->TypeGuard(Type::NonInternal()), entry_key, control);
+ Node* value = effect =
+ graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ entry_key, effect, control);
Node* done = jsgraph()->FalseConstant();
// Advance the index on the {receiver}.
@@ -2369,122 +1947,6 @@ Node* GetStringWitness(Node* node) {
} // namespace
-// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Return the character from the {receiver} as single character string.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
-
- // Return the empty string otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->EmptyStringConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
-// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Load the character from the {receiver}.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
- receiver, masked_index, if_true);
-
- // Return NaN otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->NaNConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
// ES6 String.prototype.concat(...args)
// #sec-string.prototype.concat
Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
@@ -2516,34 +1978,6 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
return NoChange();
}
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
- // We need at least target, receiver and search_string parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* search_string = NodeProperties::GetValueInput(node, 2);
- Type* search_string_type = NodeProperties::GetType(search_string);
- Node* position = (node->op()->ValueInputCount() >= 4)
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->ZeroConstant();
- Type* position_type = NodeProperties::GetType(position);
-
- if (search_string_type->Is(Type::String()) &&
- position_type->Is(Type::SignedSmall())) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->ReplaceInput(1, search_string);
- node->ReplaceInput(2, position);
- node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
- return Changed(node);
- }
- }
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
Node* effect = NodeProperties::GetEffectInput(node);
@@ -2584,9 +2018,7 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
Node* index = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
receiver, effect, control);
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), string,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
// branch0: if (index < length)
Node* check0 =
@@ -2677,9 +2109,8 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
// Update iterator.[[NextIndex]]
- Node* char_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), vtrue0,
- etrue0, if_true0);
+ Node* char_length =
+ graph()->NewNode(simplified()->StringLength(), vtrue0);
index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
etrue0 = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
@@ -2728,9 +2159,8 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
if (start_type->Is(type_cache_.kSingletonMinusOne) &&
end_type->Is(Type::Undefined())) {
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
Node* check =
graph()->NewNode(simplified()->NumberEqual(), receiver_length,
@@ -2855,12 +2285,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayIteratorNext(node);
case kArrayIsArray:
return ReduceArrayIsArray(node);
- case kArrayPop:
- return ReduceArrayPop(node);
- case kArrayPush:
- return ReduceArrayPush(node);
- case kArrayShift:
- return ReduceArrayShift(node);
case kDateNow:
return ReduceDateNow(node);
case kDateGetTime:
@@ -3024,14 +2448,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
- case kStringCharAt:
- return ReduceStringCharAt(node);
- case kStringCharCodeAt:
- return ReduceStringCharCodeAt(node);
case kStringConcat:
return ReduceStringConcat(node);
- case kStringIndexOf:
- return ReduceStringIndexOf(node);
case kStringIterator:
return ReduceStringIterator(node);
case kStringIteratorNext:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 2b22b0ce7c..b3c44c7a0f 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -47,15 +47,13 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind,
ArrayIteratorKind iter_kind);
Reduction ReduceArrayIteratorNext(Node* node);
- Reduction ReduceFastArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
- Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceTypedArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
Reduction ReduceTypedArrayToStringTag(Node* node);
Reduction ReduceArrayIsArray(Node* node);
- Reduction ReduceArrayPop(Node* node);
- Reduction ReduceArrayPush(Node* node);
- Reduction ReduceArrayShift(Node* node);
+
Reduction ReduceCollectionIterator(Node* node,
InstanceType collection_instance_type,
int collection_iterator_map_index);
@@ -110,11 +108,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringCharAt(Node* node);
- Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringConcat(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
- Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringSlice(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index c595b360d5..1f8e7a2cef 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api.h"
+#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
@@ -17,6 +18,7 @@
#include "src/feedback-vector-inl.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -90,20 +92,6 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
return Replace(value);
}
-// ES6 section 20.1.1 The Number Constructor
-Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- CallParameters const& p = CallParametersOf(node->op());
-
- // Turn the {node} into a {JSToNumber} call.
- DCHECK_LE(2u, p.arity());
- Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
- : NodeProperties::GetValueInput(node, 2);
- NodeProperties::ReplaceValueInputs(node, value);
- NodeProperties::ChangeOp(node, javascript()->ToNumber());
- return Changed(node);
-}
-
// ES section #sec-object-constructor
Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -549,7 +537,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
Node* value = jsgraph()->TrueConstant();
@@ -804,15 +792,37 @@ bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
+Node* JSCallReducer::WireInLoopStart(Node* k, Node** control, Node** effect) {
+ Node* loop = *control =
+ graph()->NewNode(common()->Loop(2), *control, *control);
+ Node* eloop = *effect =
+ graph()->NewNode(common()->EffectPhi(2), *effect, *effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), k,
+ k, loop);
+}
+
+void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect) {
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+}
+
Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -825,10 +835,193 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // By ensuring that {kind} is object or double, we can be polymorphic
+ // on different elements kinds.
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ if (IsSmiElementsKind(kind)) {
+ kind = FastSmiToObjectElementsKind(kind);
+ }
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = GetHoleyElementsKind(kind);
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[3] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
+ return Replace(jsgraph()->UndefinedConstant());
+}
+
+Reduction JSCallReducer::ReduceArrayReduce(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- if (receiver_maps.size() == 0) return NoChange();
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
? PACKED_DOUBLE_ELEMENTS
@@ -838,8 +1031,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
return NoChange();
}
- if (!IsFastElementsKind(next_kind) ||
- (IsDoubleElementsKind(next_kind) && IsHoleyElementsKind(next_kind))) {
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
return NoChange();
}
if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
@@ -854,36 +1046,73 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- Node* k = jsgraph()->ZeroConstant();
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
+ Node* k = jsgraph()->ZeroConstant();
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
const int stack_parameters = static_cast<int>(checkpoint_params.size());
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
Node* check_throw = nullptr;
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
+ // Set initial accumulator value
+ Node* cur = jsgraph()->TheHoleConstant();
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check =
+ graph()->NewNode(simplified()->NumberEqual(), original_length, k);
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberAdd(), k,
+ jsgraph()->OneConstant());
+ }
+
// Start the loop.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
+ Node* kloop = k = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
- checkpoint_params[3] = k;
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
control = loop;
effect = eloop;
@@ -898,7 +1127,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ jsgraph(), function, Builtins::kArrayReduceLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -910,11 +1139,12 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
- graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
- checkpoint_params[3] = next_k;
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
Node* hole_true = nullptr;
Node* hole_false = nullptr;
@@ -934,18 +1164,19 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// The contract is that we don't leak "the hole" into "user JavaScript",
// so we must rename the {element} here to explicitly exclude "the hole"
// from the type of {element}.
- element = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
- element, control);
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
- control = effect = graph()->NewNode(
- javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
- receiver, context, frame_state, effect, control);
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
// Rewire potential exception edges.
Node* on_exception = nullptr;
@@ -963,12 +1194,17 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = graph()->NewNode(common()->Merge(2), control, after_call_control);
effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
}
k = next_k;
+ cur = next_cur;
loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
eloop->ReplaceInput(1, effect);
control = if_false;
@@ -982,19 +1218,271 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
- return Replace(jsgraph()->UndefinedConstant());
-}
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
+
+Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
+ ? PACKED_DOUBLE_ELEMENTS
+ : PACKED_ELEMENTS;
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = HOLEY_ELEMENTS;
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ Node* k = graph()->NewNode(simplified()->NumberSubtract(), original_length,
+ jsgraph()->OneConstant());
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Set initial accumulator value
+ Node* cur = nullptr;
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), original_length,
+ jsgraph()->SmiConstant(0));
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* kloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function,
+ Builtins::kArrayReduceRightLoopEagerDeoptContinuation, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
+ }
+
+ k = next_k;
+ cur = next_cur;
+
+ loop->ReplaceInput(1, control);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -1007,31 +1495,18 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
@@ -1045,10 +1520,13 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* k = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
@@ -1078,18 +1556,10 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
&control, &check_fail, &check_throw);
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
checkpoint_params[4] = k;
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1108,15 +1578,44 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
// Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
@@ -1143,11 +1642,19 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
callback_value, effect, control);
- k = next_k;
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_and_store_control = control;
+ Node* after_call_and_store_effect = effect;
+ control = hole_true;
+ effect = effect_true;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
- eloop->ReplaceInput(1, effect);
+ control = graph()->NewNode(common()->Merge(2), control,
+ after_call_and_store_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect,
+ after_call_and_store_effect, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
control = if_false;
effect = eloop;
@@ -1168,11 +1675,15 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
@@ -1184,21 +1695,14 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
+ // The output array is packed (filter doesn't visit holes).
+ const ElementsKind packed_kind = GetPackedElementsKind(kind);
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
@@ -1206,23 +1710,24 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
Handle<Map> initial_map(
- Map::cast(native_context()->GetInitialJSArrayMap(kind)));
+ Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)));
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* a; // Construct the output array.
{
@@ -1232,7 +1737,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
- ab.Store(AccessBuilder::ForJSArrayLength(kind), jsgraph()->ZeroConstant());
+ ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
+ jsgraph()->ZeroConstant());
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
@@ -1268,19 +1774,11 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
Node* v_to_loop = to = graph()->NewNode(
common()->Phi(MachineRepresentation::kTaggedSigned, 2), to, to, loop);
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1305,15 +1803,45 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Make sure the map hasn't changed during the iteration.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+ Node* hole_true_vto = to;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
Node* callback_value = nullptr;
{
// This frame state is dealt with by hand in
@@ -1363,14 +1891,25 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
// We have to coerce callback_value to boolean, and only store the element in
// a if it's true. The checkpoint above protects against the case that
// growing {a} fails.
- to = DoFilterPostCallbackWork(kind, &control, &effect, a, to, element,
+ to = DoFilterPostCallbackWork(packed_kind, &control, &effect, a, to, element,
callback_value);
- k = next_k;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ to =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTaggedSigned, 2),
+ hole_true_vto, to, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
v_to_loop->ReplaceInput(1, to);
- eloop->ReplaceInput(1, effect);
control = if_false;
effect = eloop;
@@ -1387,6 +1926,216 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
return Replace(a);
}
+Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Builtins::Name eager_continuation_builtin;
+ Builtins::Name lazy_continuation_builtin;
+ Builtins::Name after_callback_lazy_continuation_builtin;
+ if (variant == ArrayFindVariant::kFind) {
+ eager_continuation_builtin = Builtins::kArrayFindLoopEagerDeoptContinuation;
+ lazy_continuation_builtin = Builtins::kArrayFindLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindLoopAfterCallbackLazyDeoptContinuation;
+ } else {
+ DCHECK_EQ(ArrayFindVariant::kFindIndex, variant);
+ eager_continuation_builtin =
+ Builtins::kArrayFindIndexLoopEagerDeoptContinuation;
+ lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation;
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, lazy_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, frame_state, effect,
+ &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ // Check if we've iterated past the last element of the array.
+ Node* if_false = nullptr;
+ {
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue), continue_test, control);
+ control = graph()->NewNode(common()->IfTrue(), continue_branch);
+ if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ }
+
+ // Check the map hasn't changed during the iteration.
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, eager_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load k-th element from receiver.
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ // Increment k for the next iteration.
+ Node* next_k = checkpoint_params[3] =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ // Replace holes with undefined.
+ if (IsHoleyElementsKind(kind)) {
+ element = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant()),
+ jsgraph()->UndefinedConstant(), element);
+ }
+
+ Node* if_found_return_value =
+ (variant == ArrayFindVariant::kFind) ? element : k;
+
+ // Call the callback.
+ Node* callback_value = nullptr;
+ {
+ std::vector<Node*> call_checkpoint_params({receiver, fncallback, this_arg,
+ next_k, original_length,
+ if_found_return_value});
+ const int call_stack_parameters =
+ static_cast<int>(call_checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, after_callback_lazy_continuation_builtin,
+ node->InputAt(0), context, &call_checkpoint_params[0],
+ call_stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // Check whether the given callback function returned a truthy value.
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* efound_branch = effect;
+ Node* found_branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ boolean_result, control);
+ Node* if_found = graph()->NewNode(common()->IfTrue(), found_branch);
+ Node* if_notfound = graph()->NewNode(common()->IfFalse(), found_branch);
+ control = if_notfound;
+
+ // Close the loop.
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_found, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), efound_branch, eloop, control);
+
+ Node* if_not_found_value = (variant == ArrayFindVariant::kFind)
+ ? jsgraph()->UndefinedConstant()
+ : jsgraph()->MinusOneConstant();
+ Node* return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_found_return_value, if_not_found_value, control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
Node** effect, Node* a, Node* to,
Node* element,
@@ -1411,8 +2160,8 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
// We know that {to} is in Unsigned31 range here, being smaller than
// {original_length} at all times.
- Node* checked_to =
- graph()->NewNode(common()->TypeGuard(Type::Unsigned31()), to, if_true);
+ Node* checked_to = etrue = graph()->NewNode(
+ common()->TypeGuard(Type::Unsigned31()), to, etrue, if_true);
Node* elements_length = etrue = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
etrue, if_true);
@@ -1420,9 +2169,9 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
GrowFastElementsMode mode =
IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
- elements = etrue =
- graph()->NewNode(simplified()->MaybeGrowFastElements(mode), a, elements,
- checked_to, elements_length, etrue, if_true);
+ elements = etrue = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), a,
+ elements, checked_to, elements_length, etrue, if_true);
// Update the length of {a}.
Node* new_length_a = graph()->NewNode(simplified()->NumberAdd(), checked_to,
@@ -1489,14 +2238,15 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw,
}
Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
- Node* control, Node** effect, Node** k) {
+ Node* control, Node** effect, Node** k,
+ const VectorSlotPair& feedback) {
// Make sure that the access is still in bounds, since the callback could have
// changed the array's size.
Node* length = *effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
*effect, control);
- *k = *effect = graph()->NewNode(simplified()->CheckBounds(), *k, length,
- *effect, control);
+ *k = *effect = graph()->NewNode(simplified()->CheckBounds(feedback), *k,
+ length, *effect, control);
// Reload the elements pointer before calling the callback, since the previous
// callback might have resized the array causing the elements buffer to be
@@ -1514,6 +2264,455 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
return element;
}
+Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_false_callback;
+ Node* efalse_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_boolean_result, control);
+ if_false_callback = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ efalse_callback = effect;
+
+ // Nothing to do in the true case.
+ control = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_false_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, efalse_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
+Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ if (receiver_maps.size() == 0) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_true_callback;
+ Node* etrue_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_boolean_result, control);
+ if_true_callback = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ etrue_callback = effect;
+
+ // Nothing to do in the false case.
+ control = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, next_k);
+ eloop->ReplaceInput(1, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_true_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, etrue_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
Handle<JSFunction> function) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -1911,8 +3110,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeCall(node);
case Builtins::kFunctionPrototypeHasInstance:
return ReduceFunctionPrototypeHasInstance(node);
- case Builtins::kNumberConstructor:
- return ReduceNumberConstructor(node);
case Builtins::kObjectConstructor:
return ReduceObjectConstructor(node);
case Builtins::kObjectGetPrototypeOf:
@@ -1941,8 +3138,30 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceArrayMap(function, node);
case Builtins::kArrayFilter:
return ReduceArrayFilter(function, node);
+ case Builtins::kArrayReduce:
+ return ReduceArrayReduce(function, node);
+ case Builtins::kArrayReduceRight:
+ return ReduceArrayReduceRight(function, node);
+ case Builtins::kArrayPrototypeFind:
+ return ReduceArrayFind(ArrayFindVariant::kFind, function, node);
+ case Builtins::kArrayPrototypeFindIndex:
+ return ReduceArrayFind(ArrayFindVariant::kFindIndex, function, node);
+ case Builtins::kArrayEvery:
+ return ReduceArrayEvery(function, node);
+ case Builtins::kArrayPrototypePush:
+ return ReduceArrayPrototypePush(node);
+ case Builtins::kArrayPrototypePop:
+ return ReduceArrayPrototypePop(node);
+ case Builtins::kArrayPrototypeShift:
+ return ReduceArrayPrototypeShift(node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
+ case Builtins::kStringPrototypeIndexOf:
+ return ReduceStringPrototypeIndexOf(function, node);
+ case Builtins::kStringPrototypeCharAt:
+ return ReduceStringPrototypeCharAt(node);
+ case Builtins::kStringPrototypeCharCodeAt:
+ return ReduceStringPrototypeCharCodeAt(node);
default:
break;
}
@@ -2046,9 +3265,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
target_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSCall node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -2119,9 +3338,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
array_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -2142,9 +3361,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {new_target} is still the {new_target_feedback}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
new_target, new_target_feedback);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSConstruct node to the {new_target_feedback}.
NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
@@ -2297,6 +3516,47 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
+ Handle<JSFunction> function, Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* new_receiver = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), receiver, effect, control);
+
+ Node* search_string = NodeProperties::GetValueInput(node, 2);
+ Node* new_search_string = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()), search_string,
+ effect, control);
+
+ Node* new_position = jsgraph()->ZeroConstant();
+ if (node->op()->ValueInputCount() >= 4) {
+ Node* position = NodeProperties::GetValueInput(node, 3);
+ new_position = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), position, effect, control);
+ }
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, new_receiver);
+ node->ReplaceInput(1, new_search_string);
+ node->ReplaceInput(2, new_position);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -2328,9 +3588,9 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -2339,6 +3599,571 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+namespace {
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ Isolate* isolate = jsarray_map->GetIsolate();
+ Handle<Name> length_string = isolate->factory()->length_string();
+ DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+ int number =
+ descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
+ !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+} // namespace
+
+// ES6 section 22.1.3.18 Array.prototype.push ( )
+Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ int const num_values = node->op()->ValueInputCount() - 2;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to determine the {receiver} map(s).
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
+
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
+ value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+ Node* value = length;
+
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+ effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.17 Array.prototype.pop ( )
+Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.22 Array.prototype.shift ( )
+Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
+
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
+
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
+
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
+
+ ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements, index,
+ effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(),
+ index, jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code =
+ jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference(builtin_entry, isolate()));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ jsgraph()->PaddingConstant(), argc, target,
+ jsgraph()->UndefinedConstant(), entry, argc, context,
+ frame_state, efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Return the character from the {receiver} as single character string.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ masked_index, if_true);
+
+ // Return the empty string otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharCodeAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Load the character from the {receiver}.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_index, if_true);
+
+ // Return NaN otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->NaNConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6e2353c4c1..b2656b6be8 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class VectorSlotPair;
namespace compiler {
@@ -24,7 +25,6 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-class VectorSlotPair;
// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
@@ -55,7 +55,6 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
- Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -73,8 +72,18 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduce(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduceRight(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
+ enum class ArrayFindVariant : uint8_t { kFind, kFindIndex };
+ Reduction ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayEvery(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArraySome(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayPrototypePush(Node* node);
+ Reduction ReduceArrayPrototypePop(Node* node);
+ Reduction ReduceArrayPrototypeShift(Node* node);
Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
Node* node, int arity, CallFrequency const& frequency,
VectorSlotPair const& feedback);
@@ -85,6 +94,10 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceReturnReceiver(Node* node);
+ Reduction ReduceStringPrototypeIndexOf(Handle<JSFunction> function,
+ Node* node);
+ Reduction ReduceStringPrototypeCharAt(Node* node);
+ Reduction ReduceStringPrototypeCharCodeAt(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -107,10 +120,20 @@ class JSCallReducer final : public AdvancedReducer {
Node* effect, Node** check_fail,
Node** control);
+ // Begin the central loop of a higher-order array builtin. A Loop is wired
+ // into {control}, an EffectPhi into {effect}, and the array index {k} is
+ // threaded into a Phi, which is returned. It's helpful to save the
+ // value of {control} as the loop node, and of {effect} as the corresponding
+ // EffectPhi after function return.
+ Node* WireInLoopStart(Node* k, Node** control, Node** effect);
+ void WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect);
+
// Load receiver[k], first bounding k by receiver array length.
// k is thusly changed, and the effect is changed as well.
Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control,
- Node** effect, Node** k);
+ Node** effect, Node** k,
+ const VectorSlotPair& feedback);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 9b0601f8f1..d3b9ee4e70 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -525,7 +525,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// This has to be kept in sync with src/runtime/runtime-array.cc,
// where this limit is protected.
length = effect = graph()->NewNode(
- simplified()->CheckBounds(), length,
+ simplified()->CheckBounds(VectorSlotPair()), length,
jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect,
control);
@@ -617,15 +617,16 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
if (IsSmiElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
}
}
} else if (IsDoubleElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::Number())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
}
// Make sure we do not store signaling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
@@ -913,6 +914,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK(!function_map->is_dictionary_map());
// Emit code to allocate the JSFunction instance.
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(function_map->instance_size());
a.Store(AccessBuilder::ForMap(), function_map);
@@ -980,9 +982,9 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
AllocationBuilder aa(jsgraph(), effect, graph()->start());
aa.AllocateArray(2, factory()->fixed_array_map());
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(0), key);
+ jsgraph()->ZeroConstant(), key);
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(1), value);
+ jsgraph()->OneConstant(), value);
Node* elements = aa.Finish();
AllocationBuilder a(jsgraph(), elements, graph()->start());
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index d06717717d..c09dcbc1b3 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -251,32 +251,17 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // Load global object from the context.
- Node* native_context = effect =
- graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, control);
- Node* global = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), native_context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, control);
- NodeProperties::ReplaceEffectInput(node, effect);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
- CodeFactory::StoreGlobalIC(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
Callable callable =
- CodeFactory::StoreGlobalICInOptimizedCode(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- node->InsertInput(zone(), 4, vector);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
@@ -708,6 +693,10 @@ void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 12c610da56..cb3c620117 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -305,10 +305,6 @@ Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
-Node* JSGraph::DeadValue() {
- return CACHED(kDeadValue, graph()->NewNode(common()->DeadValue()));
-}
-
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
for (size_t i = 0; i < arraysize(cached_nodes_); i++) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a685fd69a8..f5b4bdc181 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -155,9 +155,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
- // Sentinel for a value resulting from unreachable computations.
- Node* DeadValue();
-
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
@@ -199,7 +196,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStateValues,
kSingleDeadTypedStateValues,
kDead,
- kDeadValue,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 9cff51985a..c9909dcb75 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -6,6 +6,7 @@
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
@@ -556,6 +557,8 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
Node** if_successes,
Node** calls, Node** inputs,
int input_count) {
+ SourcePositionTable::Scope position(
+ source_positions_, source_positions_->GetSourcePosition(node));
if (TryReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
input_count)) {
return;
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index dffa5cfd6a..f4f24f41b4 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -22,6 +22,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
inliner_(editor, local_zone, info, jsgraph, source_positions),
candidates_(local_zone),
seen_(local_zone),
+ source_positions_(source_positions),
jsgraph_(jsgraph) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -85,6 +86,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSInliner inliner_;
Candidates candidates_;
ZoneSet<NodeId> seen_;
+ SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
int cumulative_count_ = 0;
};
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 2322b8ac3a..dc1ec521f2 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -135,7 +135,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ common()->Deoptimize(DeoptimizeKind::kEager,
+ DeoptimizeReason::kDeoptimizeNow, VectorSlotPair()),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index a6786da157..b2f8c567e2 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -20,6 +20,7 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -596,8 +597,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
// Check that the {value} is a Smi.
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
}
@@ -1061,13 +1062,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (access_mode == AccessMode::kStore) return NoChange();
// Ensure that the {receiver} is actually a String.
- receiver = effect = graph()->NewNode(simplified()->CheckString(), receiver,
- effect, control);
+ receiver = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
// Determine the {receiver} length.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
@@ -1425,9 +1424,9 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -1504,7 +1503,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, enumerator);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
@@ -1525,9 +1524,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
simplified()->BooleanNot(),
graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
jsgraph()->EmptyFixedArrayConstant()));
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongEnumIndices), check,
+ effect, control);
// Determine the index from the {enum_indices}.
index = effect = graph()->NewNode(
@@ -1775,7 +1774,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
@@ -1809,8 +1808,9 @@ JSNativeContextSpecialization::BuildPropertyStore(
access_mode == AccessMode::kStoreInLiteral);
switch (field_representation) {
case MachineRepresentation::kFloat64: {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
@@ -1852,8 +1852,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->NumberEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
break;
@@ -1871,14 +1871,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
if (field_representation == MachineRepresentation::kTaggedSigned) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
field_access.write_barrier_kind = kNoWriteBarrier;
} else if (field_representation ==
@@ -2007,7 +2007,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* name = NodeProperties::GetValueInput(node, 1);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
jsgraph()->HeapConstant(cached_name));
- effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongName),
check, effect, control);
Node* value = NodeProperties::GetValueInput(node, 2);
@@ -2127,13 +2127,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Access the actual element.
@@ -2279,13 +2280,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Compute the element access.
@@ -2406,11 +2408,12 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsSmiElementsKind(elements_kind)) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
} else if (IsDoubleElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
// Make sure we do not store signalling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
}
@@ -2443,8 +2446,9 @@ JSNativeContextSpecialization::BuildElementAccess(
jsgraph()->Constant(JSObject::kMaxGap))
: graph()->NewNode(simplified()->NumberAdd(), length,
jsgraph()->OneConstant());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- limit, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ limit, effect, control);
// Grow {elements} backing store if necessary.
GrowFastElementsMode mode =
@@ -2452,8 +2456,8 @@ JSNativeContextSpecialization::BuildElementAccess(
? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- index, elements_length, effect, control);
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
+ receiver, elements, index, elements_length, effect, control);
// Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
@@ -2505,9 +2509,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
// Ensure that the {index} is a valid String length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(String::kMaxLength),
- *effect, *control);
+ index = *effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(String::kMaxLength), *effect, *control);
// Load the single character string from {receiver} or yield
// undefined if the {index} is not within the valid bounds.
@@ -2531,8 +2535,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
vtrue, vfalse, *control);
} else {
// Ensure that {index} is less than {receiver} length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, *effect, *control);
+ index = *effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, *effect, *control);
Node* masked_index =
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
@@ -2579,8 +2584,8 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
common()->Select(MachineRepresentation::kTaggedSigned),
graph()->NewNode(simplified()->ObjectIsSmi(), properties), properties,
jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
- hash = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()), hash,
- control);
+ hash = effect = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()),
+ hash, effect, control);
hash =
graph()->NewNode(simplified()->NumberShiftLeft(), hash,
jsgraph()->Constant(PropertyArray::HashField::kShift));
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5b5e6589d2..0ddf859cff 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,9 +9,9 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/feedback-vector.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,29 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-VectorSlotPair::VectorSlotPair() {}
-
-
-int VectorSlotPair::index() const {
- return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
-}
-
-
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot() == rhs.slot() &&
- lhs.vector().location() == rhs.vector().location();
-}
-
-
-bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(VectorSlotPair const& p) {
- return base::hash_combine(p.slot(), p.vector().location());
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
@@ -599,6 +576,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
V(StackCheck, Operator::kNoWrite, 0, 0) \
V(Debugger, Operator::kNoProperties, 0, 0) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
@@ -645,6 +623,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<BinaryOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
#undef BINARY_OP
@@ -667,6 +646,7 @@ struct JSOperatorGlobalCache final {
k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
+ Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
@@ -703,6 +683,8 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##NumberOrOddballOperator; \
case BinaryOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case BinaryOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case BinaryOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -729,6 +711,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##StringOperator; \
case CompareOperationHint::kSymbol: \
return &cache_.k##Name##SymbolOperator; \
+ case CompareOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
@@ -763,8 +747,10 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode) {
- CallParameters parameters(arity, frequency, feedback, convert_mode);
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode) {
+ CallParameters parameters(arity, frequency, feedback, convert_mode,
+ speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCall, Operator::kNoProperties, // opcode
"JSCall", // name
@@ -781,9 +767,10 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
}
const Operator* JSOperatorBuilder::CallWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback,
+ SpeculationMode speculation_mode) {
CallParameters parameters(arity, frequency, feedback,
- ConvertReceiverMode::kAny);
+ ConvertReceiverMode::kAny, speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCallWithSpread, Operator::kNoProperties, // opcode
"JSCallWithSpread", // name
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 94a9b1fdb6..3875234d5a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -10,6 +10,7 @@
#include "src/handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,6 @@ class AllocationSite;
class BoilerplateDescription;
class ConstantElementsPair;
class SharedFunctionInfo;
-class FeedbackVector;
namespace compiler {
@@ -59,32 +59,6 @@ std::ostream& operator<<(std::ostream&, CallFrequency);
CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
-// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
-// is used to access the type feedback for a certain {Node}.
-class V8_EXPORT_PRIVATE VectorSlotPair {
- public:
- VectorSlotPair();
- VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
-
- bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
-
- Handle<FeedbackVector> vector() const { return vector_; }
- FeedbackSlot slot() const { return slot_; }
-
- int index() const;
-
- private:
- const Handle<FeedbackVector> vector_;
- const FeedbackSlot slot_;
-};
-
-bool operator==(VectorSlotPair const&, VectorSlotPair const&);
-bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
-
-size_t hash_value(VectorSlotPair const&);
-
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -187,8 +161,10 @@ class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode)
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode)
: bit_field_(ArityField::encode(arity) |
+ SpeculationModeField::encode(speculation_mode) |
ConvertReceiverModeField::encode(convert_mode)),
frequency_(frequency),
feedback_(feedback) {}
@@ -200,6 +176,10 @@ class CallParameters final {
}
VectorSlotPair const& feedback() const { return feedback_; }
+ SpeculationMode speculation_mode() const {
+ return SpeculationModeField::decode(bit_field_);
+ }
+
bool operator==(CallParameters const& that) const {
return this->bit_field_ == that.bit_field_ &&
this->frequency_ == that.frequency_ &&
@@ -212,7 +192,8 @@ class CallParameters final {
return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
}
- typedef BitField<size_t, 0, 29> ArityField;
+ typedef BitField<size_t, 0, 28> ArityField;
+ typedef BitField<SpeculationMode, 28, 1> SpeculationModeField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
uint32_t const bit_field_;
@@ -693,11 +674,13 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Call(
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency frequency = CallFrequency(),
- VectorSlotPair const& feedback = VectorSlotPair());
+ VectorSlotPair const& feedback = VectorSlotPair(),
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -761,8 +744,9 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
// Used to implement Ignition's RestoreGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
- // Used to implement Ignition's RestoreGeneratorRegisters bytecode.
+ // Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreRegister(int index);
+ const Operator* GeneratorRestoreInputOrDebugPos();
const Operator* StackCheck();
const Operator* Debugger();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index a7ce12cdb4..0ec63600a2 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -38,6 +38,7 @@ bool BinaryOperationHintToNumberOperationHint(
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
break;
}
return false;
@@ -90,6 +91,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -493,7 +495,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
DeoptimizeReason reason) const {
if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
Node* deoptimize = jsgraph()->graph()->NewNode(
- jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
+ VectorSlotPair()),
jsgraph()->Dead(), effect, control);
Node* frame_state = NodeProperties::FindFrameStateBefore(deoptimize);
deoptimize->ReplaceInput(0, frame_state);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 2380c7c0f4..c265caf9f0 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -47,6 +47,7 @@ class JSBinopReduction final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -156,14 +157,16 @@ class JSBinopReduction final {
// CheckString node.
void CheckInputsToString() {
if (!left_type()->Is(Type::String())) {
- Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
- effect(), control());
+ Node* left_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), left(),
+ effect(), control());
node_->ReplaceInput(0, left_input);
update_effect(left_input);
}
if (!right_type()->Is(Type::String())) {
- Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
- effect(), control());
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), right(),
+ effect(), control());
node_->ReplaceInput(1, right_input);
update_effect(right_input);
}
@@ -308,7 +311,8 @@ class JSBinopReduction final {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return simplified()->NumberLessThanOrEqual();
case IrOpcode::kSpeculativeNumberAdd:
- return simplified()->NumberAdd();
+ // Handled by ReduceSpeculativeNumberAdd.
+ UNREACHABLE();
case IrOpcode::kSpeculativeNumberSubtract:
return simplified()->NumberSubtract();
case IrOpcode::kSpeculativeNumberMultiply:
@@ -539,13 +543,15 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (r.LeftInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.right(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.right(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (r.RightInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.left(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.left(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -594,6 +600,9 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
if ((hint == NumberOperationHint::kNumber ||
hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::NumberOrUndefinedOrNullOrBoolean())) {
+ // We intentionally do this only in the Number and NumberOrOddball hint case
+ // because simplified lowering of these speculative ops may do some clever
+ // reductions in the other cases.
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
Type::Number());
@@ -634,22 +643,22 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// Make sure {first} is actually a String.
Type* first_type = NodeProperties::GetType(first);
if (!first_type->Is(Type::String())) {
- first = effect =
- graph()->NewNode(simplified()->CheckString(), first, effect, control);
+ first = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), first, effect, control);
first_type = NodeProperties::GetType(first);
}
// Make sure {second} is actually a String.
Type* second_type = NodeProperties::GetType(second);
if (!second_type->Is(Type::String())) {
- second = effect =
- graph()->NewNode(simplified()->CheckString(), second, effect, control);
+ second = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), second, effect, control);
second_type = NodeProperties::GetType(second);
}
// Determine the {first} length.
- Node* first_length = BuildGetStringLength(first, &effect, control);
- Node* second_length = BuildGetStringLength(second, &effect, control);
+ Node* first_length = BuildGetStringLength(first);
+ Node* second_length = BuildGetStringLength(second);
// Compute the resulting length.
Node* length =
@@ -661,9 +670,9 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// has the additional benefit of not holding on to the lazy {frame_state}
// and thus potentially reduces the number of live ranges and allows for
// more truncations.
- length = effect = graph()->NewNode(simplified()->CheckBounds(), length,
- jsgraph()->Constant(String::kMaxLength),
- effect, control);
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), length,
+ jsgraph()->Constant(String::kMaxLength), effect, control);
} else {
// Check if we would overflow the allowed maximum string length.
Node* check =
@@ -698,40 +707,25 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
Revisit(graph()->end());
}
control = graph()->NewNode(common()->IfTrue(), branch);
+ length = effect =
+ graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
+ length, effect, control);
}
- // Figure out the map for the resulting ConsString.
- // TODO(turbofan): We currently just use the cons_string_map here for
- // the sake of simplicity; we could also try to be smarter here and
- // use the one_byte_cons_string_map instead when the resulting ConsString
- // contains only one byte characters.
- Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
-
- // Allocate the resulting ConsString.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(ConsString::kSize, NOT_TENURED, Type::OtherString());
- a.Store(AccessBuilder::ForMap(), value_map);
- a.Store(AccessBuilder::ForNameHashField(),
- jsgraph()->Constant(Name::kEmptyHashField));
- a.Store(AccessBuilder::ForStringLength(), length);
- a.Store(AccessBuilder::ForConsStringFirst(), first);
- a.Store(AccessBuilder::ForConsStringSecond(), second);
-
- // Morph the {node} into a {FinishRegion}.
- ReplaceWithValue(node, node, node, control);
- a.FinishAndChange(node);
- return Changed(node);
+ Node* value =
+ graph()->NewNode(simplified()->NewConsString(), length, first, second);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
-Node* JSTypedLowering::BuildGetStringLength(Node* value, Node** effect,
- Node* control) {
+Node* JSTypedLowering::BuildGetStringLength(Node* value) {
+ // TODO(bmeurer): Get rid of this hack and instead have a way to
+ // express the string length in the types.
HeapObjectMatcher m(value);
Node* length =
(m.HasValue() && m.Value()->IsString())
? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
- : (*effect) = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- value, *effect, control);
+ : graph()->NewNode(simplified()->StringLength(), value);
return length;
}
@@ -866,9 +860,9 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
- if (r.OneInputCannotBe(Type::NumberOrString())) {
- // For values with canonical representation (i.e. neither String, nor
- // Number) an empty type intersection means the values cannot be strictly
+ if (r.OneInputCannotBe(Type::NumericOrString())) {
+ // For values with canonical representation (i.e. neither String nor
+ // Numeric) an empty type intersection means the values cannot be strictly
// equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->FalseConstant();
@@ -1015,6 +1009,7 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
return Changed(node);
}
+ // TODO(neis): Reduce ToNumeric to ToNumber if input can't be BigInt?
return NoChange();
}
@@ -1051,7 +1046,9 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return Replace(jsgraph()->HeapConstant(
factory()->NumberToString(factory()->NewNumber(input_type->Min()))));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:number)
+ if (input_type->Is(Type::Number())) {
+ return Replace(graph()->NewNode(simplified()->NumberToString(), input));
+ }
return NoChange();
}
@@ -1133,16 +1130,12 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type* receiver_type = NodeProperties::GetType(receiver);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
Handle<Name> name = NamedAccessOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
- ReplaceWithValue(node, value, effect);
+ Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
+ ReplaceWithValue(node, value);
return Replace(value);
}
return NoChange();
@@ -1783,7 +1776,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
check, effect, control);
// Since the change to LoadElement() below is effectful, we connect
@@ -2098,6 +2091,22 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
return Changed(element);
}
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreInputOrDebugPos, node->opcode());
+
+ FieldAccess input_or_debug_pos_field =
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ const Operator* new_op = simplified()->LoadField(input_or_debug_pos_field);
+
+ // Mutate the node in-place.
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ DCHECK(!OperatorProperties::HasContextInput(new_op));
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+}
+
Reduction JSTypedLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSEqual:
@@ -2183,6 +2192,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorRestoreContinuation(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ return ReduceJSGeneratorRestoreInputOrDebugPos(node);
// TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
// fooling anyone. Consider moving this into a separate reducer.
case IrOpcode::kSpeculativeNumberAdd:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 8b00c1d32c..d72303f495 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -73,6 +73,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+ Reduction ReduceJSGeneratorRestoreInputOrDebugPos(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
@@ -85,8 +86,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
- // Helpers for ReduceJSCreateConsString and ReduceJSStringConcat.
- Node* BuildGetStringLength(Node* value, Node** effect, Node* control);
+ // Helpers for ReduceJSCreateConsString.
+ Node* BuildGetStringLength(Node* value);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 03b8074f0f..5df50e64f5 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -75,33 +75,42 @@ bool CallDescriptor::HasSameReturnLocationsAs(
return true;
}
-int CallDescriptor::GetStackParameterDelta(
- CallDescriptor const* tail_caller) const {
- int callee_slots_above_sp = 0;
+int CallDescriptor::GetFirstUnusedStackSlot() const {
+ int slots_above_sp = 0;
for (size_t i = 0; i < InputCount(); ++i) {
LinkageLocation operand = GetInputLocation(i);
if (!operand.IsRegister()) {
int new_candidate =
-operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > callee_slots_above_sp) {
- callee_slots_above_sp = new_candidate;
+ if (new_candidate > slots_above_sp) {
+ slots_above_sp = new_candidate;
}
}
}
- int tail_caller_slots_above_sp = 0;
- if (tail_caller != nullptr) {
- for (size_t i = 0; i < tail_caller->InputCount(); ++i) {
- LinkageLocation operand = tail_caller->GetInputLocation(i);
- if (!operand.IsRegister()) {
- int new_candidate =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > tail_caller_slots_above_sp) {
- tail_caller_slots_above_sp = new_candidate;
- }
+ return slots_above_sp;
+}
+
+int CallDescriptor::GetStackParameterDelta(
+ CallDescriptor const* tail_caller) const {
+ int callee_slots_above_sp = GetFirstUnusedStackSlot();
+ int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
+ int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
+ if (kPadArguments) {
+ // Adjust stack delta when it is odd.
+ if (stack_param_delta % 2 != 0) {
+ if (callee_slots_above_sp % 2 != 0) {
+ // The delta is odd due to the callee - we will need to add one slot
+ // of padding.
+ ++stack_param_delta;
+ } else {
+ // The delta is odd because of the caller. We already have one slot of
+ // padding that we can reuse for arguments, so we will need one fewer
+ // slot.
+ --stack_param_delta;
}
}
}
- return callee_slots_above_sp - tail_caller_slots_above_sp;
+ return stack_param_delta;
}
bool CallDescriptor::CanTailCall(const Node* node) const {
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 9e79a9af00..ade1d6902f 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -177,17 +177,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kNeedsFrameState = 1u << 0,
kHasExceptionHandler = 1u << 1,
kCanUseRoots = 1u << 2,
- // (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 3,
- // (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 4,
- kRestoreCSP = 1u << 5,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 6,
+ kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
- kNoAllocate = 1u << 7,
+ kNoAllocate = 1u << 4,
// Push argument count as part of function prologue.
- kPushArgumentCount = 1u << 8
+ kPushArgumentCount = 1u << 5,
+ // Use retpoline for this call if indirect.
+ kRetpoline = 1u << 6
};
typedef base::Flags<Flag> Flags;
@@ -197,12 +194,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
- const RegList allocatable_registers = 0)
+ const RegList allocatable_registers = 0,
+ size_t stack_return_count = 0)
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
location_sig_(location_sig),
stack_param_count_(stack_param_count),
+ stack_return_count_(stack_return_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers),
@@ -232,6 +231,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of stack parameters to the call.
size_t StackParameterCount() const { return stack_param_count_; }
+ // The number of stack return values from the call.
+ size_t StackReturnCount() const { return stack_return_count_; }
+
// The number of parameters to the JS function call.
size_t JSParameterCount() const {
DCHECK(IsJSFunctionCall());
@@ -248,7 +250,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool UseNativeStack() const { return flags() & kUseNativeStack; }
bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
return flags() & kInitializeRootRegister;
@@ -293,7 +294,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- int GetStackParameterDelta(const CallDescriptor* tail_caller = nullptr) const;
+ // Returns the first stack slot that is not used by the stack parameters.
+ int GetFirstUnusedStackSlot() const;
+
+ int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
bool CanTailCall(const Node* call) const;
@@ -318,6 +322,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const LinkageLocation target_loc_;
const LocationSignature* const location_sig_;
const size_t stack_param_count_;
+ const size_t stack_return_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 0313e57909..7888f5a21e 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -811,12 +811,12 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
}
Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (mode == GrowFastElementsMode::kDoubleElements) {
+ if (params.mode() == GrowFastElementsMode::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index a9cd46d975..d6b88b13f5 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define OFFSET(x) ((x)&0x1f)
+#define OFFSET(x) ((x)&0x1F)
#define BIT(x) (1u << OFFSET(x))
#define INDEX(x) ((x) >> 5)
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 5f8857c5df..ae5b0dfbac 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-peeling.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -107,7 +108,7 @@ struct Peeling {
// The vector which contains the mapped nodes.
NodeVector* pairs;
- Peeling(Graph* graph, Zone* tmp_zone, size_t max, NodeVector* p)
+ Peeling(Graph* graph, size_t max, NodeVector* p)
: node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
Node* map(Node* node) {
@@ -121,10 +122,13 @@ struct Peeling {
pairs->push_back(copy);
}
- void CopyNodes(Graph* graph, Zone* tmp_zone, Node* dead, NodeRange nodes) {
- NodeVector inputs(tmp_zone);
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ SourcePositionTable* source_positions) {
+ NodeVector inputs(tmp_zone_);
// Copy all the nodes first.
for (Node* node : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(node));
inputs.clear();
for (Node* input : node->inputs()) {
inputs.push_back(map(input));
@@ -166,13 +170,13 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+bool LoopPeeler::CanPeel(LoopTree::Loop* loop) {
// Look for returns and if projections that are outside the loop but whose
// control input is inside the loop.
- Node* loop_node = loop_tree->GetLoopControl(loop);
- for (Node* node : loop_tree->LoopNodes(loop)) {
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
+ for (Node* node : loop_tree_->LoopNodes(loop)) {
for (Node* use : node->uses()) {
- if (!loop_tree->Contains(loop, use)) {
+ if (!loop_tree_->Contains(loop, use)) {
bool unmarked_exit;
switch (node->opcode()) {
case IrOpcode::kLoopExit:
@@ -187,7 +191,7 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
}
if (unmarked_exit) {
if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
PrintF(
"Cannot peel loop %i. Loop exit without explicit mark: Node %i "
"(%s) is inside "
@@ -203,47 +207,45 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
return true;
}
-
-PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone) {
- if (!CanPeel(loop_tree, loop)) return nullptr;
+PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
+ if (!CanPeel(loop)) return nullptr;
//============================================================================
// Construct the peeled iteration.
//============================================================================
- PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
+ PeeledIterationImpl* iter = new (tmp_zone_) PeeledIterationImpl(tmp_zone_);
size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
- Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
+ Peeling peeling(graph_, estimated_peeled_size, &iter->node_pairs_);
- Node* dead = graph->NewNode(common->Dead());
+ Node* dead = graph_->NewNode(common_->Dead());
// Map the loop header nodes to their entry values.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
}
// Copy all the nodes of loop body for the peeled iteration.
- peeling.CopyNodes(graph, tmp_zone, dead, loop_tree->BodyNodes(loop));
+ peeling.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
+ source_positions_);
//============================================================================
// Replace the entry to the loop with the output of the peeled iteration.
//============================================================================
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
Node* new_entry;
int backedges = loop_node->InputCount() - 1;
if (backedges > 1) {
// Multiple backedges from original loop, therefore multiple output edges
// from the peeled iteration.
- NodeVector inputs(tmp_zone);
+ NodeVector inputs(tmp_zone_);
for (int i = 1; i < loop_node->InputCount(); i++) {
inputs.push_back(peeling.map(loop_node->InputAt(i)));
}
Node* merge =
- graph->NewNode(common->Merge(backedges), backedges, &inputs[0]);
+ graph_->NewNode(common_->Merge(backedges), backedges, &inputs[0]);
// Merge values from the multiple output edges of the peeled iteration.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
if (node->opcode() == IrOpcode::kLoop) continue; // already done.
inputs.clear();
for (int i = 0; i < backedges; i++) {
@@ -252,8 +254,8 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
for (Node* input : inputs) {
if (input != inputs[0]) { // Non-redundant phi.
inputs.push_back(merge);
- const Operator* op = common->ResizeMergeOrPhi(node->op(), backedges);
- Node* phi = graph->NewNode(op, backedges + 1, &inputs[0]);
+ const Operator* op = common_->ResizeMergeOrPhi(node->op(), backedges);
+ Node* phi = graph_->NewNode(op, backedges + 1, &inputs[0]);
node->ReplaceInput(0, phi);
break;
}
@@ -263,7 +265,7 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
} else {
// Only one backedge, simply replace the input to loop with output of
// peeling.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
node->ReplaceInput(0, peeling.map(node->InputAt(1)));
}
new_entry = peeling.map(loop_node->InputAt(1));
@@ -273,23 +275,23 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
//============================================================================
// Change the exit and exit markers to merge/phi/effect-phi.
//============================================================================
- for (Node* exit : loop_tree->ExitNodes(loop)) {
+ for (Node* exit : loop_tree_->ExitNodes(loop)) {
switch (exit->opcode()) {
case IrOpcode::kLoopExit:
// Change the loop exit node to a merge node.
exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->Merge(2));
+ NodeProperties::ChangeOp(exit, common_->Merge(2));
break;
case IrOpcode::kLoopExitValue:
// Change exit marker to phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
NodeProperties::ChangeOp(
- exit, common->Phi(MachineRepresentation::kTagged, 2));
+ exit, common_->Phi(MachineRepresentation::kTagged, 2));
break;
case IrOpcode::kLoopExitEffect:
// Change effect exit marker to effect phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->EffectPhi(2));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(exit, common_->EffectPhi(2));
break;
default:
break;
@@ -298,15 +300,11 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
return iter;
}
-namespace {
-
-void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* temp_zone) {
+void LoopPeeler::PeelInnerLoops(LoopTree::Loop* loop) {
// If the loop has nested loops, peel inside those.
if (!loop->children().empty()) {
for (LoopTree::Loop* inner_loop : loop->children()) {
- PeelInnerLoops(graph, common, loop_tree, inner_loop, temp_zone);
+ PeelInnerLoops(inner_loop);
}
return;
}
@@ -314,15 +312,17 @@ void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
if (loop->TotalSize() > LoopPeeler::kMaxPeeledNodes) return;
if (FLAG_trace_turbo_loop) {
PrintF("Peeling loop with header: ");
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
PrintF("%i ", node->id());
}
PrintF("\n");
}
- LoopPeeler::Peel(graph, common, loop_tree, loop, temp_zone);
+ Peel(loop);
}
+namespace {
+
void EliminateLoopExit(Node* node) {
DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
// The exit markers take the loop exit as input. We iterate over uses
@@ -347,21 +347,18 @@ void EliminateLoopExit(Node* node) {
} // namespace
-// static
-void LoopPeeler::PeelInnerLoopsOfTree(Graph* graph,
- CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* temp_zone) {
- for (LoopTree::Loop* loop : loop_tree->outer_loops()) {
- PeelInnerLoops(graph, common, loop_tree, loop, temp_zone);
+void LoopPeeler::PeelInnerLoopsOfTree() {
+ for (LoopTree::Loop* loop : loop_tree_->outer_loops()) {
+ PeelInnerLoops(loop);
}
- EliminateLoopExits(graph, temp_zone);
+ EliminateLoopExits(graph_, tmp_zone_);
}
// static
-void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* temp_zone) {
- ZoneQueue<Node*> queue(temp_zone);
- ZoneVector<bool> visited(graph->NodeCount(), false, temp_zone);
+void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* tmp_zone) {
+ ZoneQueue<Node*> queue(tmp_zone);
+ ZoneVector<bool> visited(graph->NodeCount(), false, tmp_zone);
queue.push(graph->end());
while (!queue.empty()) {
Node* node = queue.front();
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 301e4b8b6c..cd08900dcd 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -13,6 +13,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SourcePositionTable;
+
// Represents the output of peeling a loop, which is basically the mapping
// from the body of the loop to the corresponding nodes in the peeled
// iteration.
@@ -31,15 +33,28 @@ class CommonOperatorBuilder;
// Implements loop peeling.
class V8_EXPORT_PRIVATE LoopPeeler {
public:
- static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
- static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone);
- static void PeelInnerLoopsOfTree(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* tmp_zone);
-
- static void EliminateLoopExits(Graph* graph, Zone* temp_zone);
+ LoopPeeler(Graph* graph, CommonOperatorBuilder* common, LoopTree* loop_tree,
+ Zone* tmp_zone, SourcePositionTable* source_positions)
+ : graph_(graph),
+ common_(common),
+ loop_tree_(loop_tree),
+ tmp_zone_(tmp_zone),
+ source_positions_(source_positions) {}
+ bool CanPeel(LoopTree::Loop* loop);
+ PeeledIteration* Peel(LoopTree::Loop* loop);
+ void PeelInnerLoopsOfTree();
+
+ static void EliminateLoopExits(Graph* graph, Zone* tmp_zone);
static const size_t kMaxPeeledNodes = 1000;
+
+ private:
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ LoopTree* const loop_tree_;
+ Zone* const tmp_zone_;
+ SourcePositionTable* const source_positions_;
+
+ void PeelInnerLoops(LoopTree::Loop* loop);
};
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 069c86414c..1e93de5124 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -301,7 +301,8 @@ const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
DCHECK_EQ(2, phi->op()->ValueInputCount());
- DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(phi)->opcode());
+ Node* loop = NodeProperties::GetControlInput(phi);
+ DCHECK_EQ(IrOpcode::kLoop, loop->opcode());
Node* initial = phi->InputAt(0);
Node* arith = phi->InputAt(1);
InductionVariable::ArithmeticType arithmeticType;
@@ -318,17 +319,20 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
}
// TODO(jarin) Support both sides.
- // XXX
- if (arith->InputAt(0) != phi) {
- if ((arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber &&
- arith->InputAt(0)->opcode() != IrOpcode::kSpeculativeToNumber) ||
- arith->InputAt(0)->InputAt(0) != phi) {
- return nullptr;
+ if (arith->InputAt(0) != phi) return nullptr;
+
+ Node* effect_phi = nullptr;
+ for (Node* use : loop->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ DCHECK_NULL(effect_phi);
+ effect_phi = use;
}
}
+ if (!effect_phi) return nullptr;
+
Node* incr = arith->InputAt(1);
- return new (zone())
- InductionVariable(phi, arith, incr, initial, zone(), arithmeticType);
+ return new (zone()) InductionVariable(phi, effect_phi, arith, incr, initial,
+ zone(), arithmeticType);
}
void LoopVariableOptimizer::DetectInductionVariables(Node* loop) {
@@ -398,10 +402,14 @@ void LoopVariableOptimizer::ChangeToPhisAndInsertGuards() {
Type* backedge_type = NodeProperties::GetType(backedge_value);
Type* phi_type = NodeProperties::GetType(induction_var->phi());
if (!backedge_type->Is(phi_type)) {
- Node* backedge_control =
- NodeProperties::GetControlInput(induction_var->phi())->InputAt(1);
- Node* rename = graph()->NewNode(common()->TypeGuard(phi_type),
- backedge_value, backedge_control);
+ Node* loop = NodeProperties::GetControlInput(induction_var->phi());
+ Node* backedge_control = loop->InputAt(1);
+ Node* backedge_effect =
+ NodeProperties::GetEffectInput(induction_var->effect_phi(), 1);
+ Node* rename =
+ graph()->NewNode(common()->TypeGuard(phi_type), backedge_value,
+ backedge_effect, backedge_control);
+ induction_var->effect_phi()->ReplaceInput(1, rename);
induction_var->phi()->ReplaceInput(1, rename);
}
}
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index 8054ec16c8..9eec614070 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -18,6 +18,7 @@ class Node;
class InductionVariable : public ZoneObject {
public:
Node* phi() const { return phi_; }
+ Node* effect_phi() const { return effect_phi_; }
Node* arith() const { return arith_; }
Node* increment() const { return increment_; }
Node* init_value() const { return init_value_; }
@@ -39,9 +40,10 @@ class InductionVariable : public ZoneObject {
private:
friend class LoopVariableOptimizer;
- InductionVariable(Node* phi, Node* arith, Node* increment, Node* init_value,
- Zone* zone, ArithmeticType arithmeticType)
+ InductionVariable(Node* phi, Node* effect_phi, Node* arith, Node* increment,
+ Node* init_value, Zone* zone, ArithmeticType arithmeticType)
: phi_(phi),
+ effect_phi_(effect_phi),
arith_(arith),
increment_(increment),
init_value_(init_value),
@@ -53,6 +55,7 @@ class InductionVariable : public ZoneObject {
void AddLowerBound(Node* bound, ConstraintKind kind);
Node* phi_;
+ Node* effect_phi_;
Node* arith_;
Node* increment_;
Node* init_value_;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 8393a749bb..43f1518461 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -116,10 +116,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedLoad:
- representation_vector_[node->id()] = PromoteRepresentation(
- CheckedLoadRepresentationOf(node->op()).representation());
- break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
@@ -165,10 +161,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
StoreRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
- break;
case IrOpcode::kUnalignedStore:
representation_vector_[node->id()] = PromoteRepresentation(
UnalignedStoreRepresentationOf(node->op()));
@@ -273,6 +265,11 @@ class MachineRepresentationInferrer {
MachineRepresentation::kFloat64;
}
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ case IrOpcode::kI32x4Splat:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kSimd128;
+ break;
#undef LABEL
default:
break;
@@ -377,6 +374,14 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ CheckValueInputRepresentationIs(node, 0,
+ MachineRepresentation::kSimd128);
+ CheckValueInputForInt32Op(node, 1);
+ break;
+ case IrOpcode::kI32x4Splat:
+ CheckValueInputForInt32Op(node, 0);
+ break;
#define LABEL(opcode) case IrOpcode::k##opcode:
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
@@ -562,7 +567,7 @@ class MachineRepresentationChecker {
str << "Node #" << node->id() << ":" << *node->op()
<< " in the machine graph is not being checked.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
break;
}
@@ -592,7 +597,7 @@ class MachineRepresentationChecker {
<< input_representation << " which doesn't have a " << representation
<< " representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -611,7 +616,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
@@ -644,7 +649,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged or pointer representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -661,7 +666,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
default:
@@ -672,7 +677,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have an int32-compatible representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForInt64Op(Node const* node, int index) {
@@ -687,7 +692,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
@@ -700,7 +705,7 @@ class MachineRepresentationChecker {
<< input_representation
<< " which doesn't have a kWord64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat32Op(Node const* node, int index) {
@@ -714,7 +719,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat32 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat64Op(Node const* node, int index) {
@@ -728,7 +733,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckCallInputs(Node const* node) {
@@ -755,7 +760,7 @@ class MachineRepresentationChecker {
}
if (should_log_error) {
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index e589f0cbd8..97c83b1b82 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -293,7 +293,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// (x >> K) < C => x < (C << K)
// when C < (M >> K)
const uint32_t c = m.right().Value();
- const uint32_t k = mleft.right().Value() & 0x1f;
+ const uint32_t k = mleft.right().Value() & 0x1F;
if (c < static_cast<uint32_t>(kMaxInt >> k)) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint32Constant(c << k));
@@ -684,7 +684,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedStore:
return ReduceStore(node);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -923,10 +922,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
MachineRepresentation rep;
int value_input;
- if (nm.IsCheckedStore()) {
- rep = CheckedStoreRepresentationOf(node->op());
- value_input = 3;
- } else if (nm.IsStore()) {
+ if (nm.IsStore()) {
rep = StoreRepresentationOf(node->op()).representation();
value_input = 2;
} else {
@@ -941,9 +937,9 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
- (m.right().Value() & 0xff) == 0xff) ||
+ (m.right().Value() & 0xFF) == 0xFF) ||
(rep == MachineRepresentation::kWord16 &&
- (m.right().Value() & 0xffff) == 0xffff))) {
+ (m.right().Value() & 0xFFFF) == 0xFFFF))) {
node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
@@ -1029,12 +1025,12 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
(node->opcode() == IrOpcode::kWord32Shr) ||
(node->opcode() == IrOpcode::kWord32Sar));
if (machine()->Word32ShiftIsSafe()) {
- // Remove the explicit 'and' with 0x1f if the shift provided by the machine
+ // Remove the explicit 'and' with 0x1F if the shift provided by the machine
// instruction matches that required by JavaScript.
Int32BinopMatcher m(node);
if (m.right().IsWord32And()) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0x1f)) {
+ if (mright.right().Is(0x1F)) {
node->ReplaceInput(1, mright.left().node());
return Changed(node);
}
@@ -1088,7 +1084,7 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 0x1f;
+ uint32_t shift = m.right().Value() & 0x1F;
uint32_t mask = mleft.right().Value();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
@@ -1180,7 +1176,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
- (mleft.right().Value() & 0x1f) >=
+ (mleft.right().Value() & 0x1F) >=
base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
@@ -1344,7 +1340,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF00000000}) |
mrhs.Value()));
}
return NoChange();
@@ -1357,7 +1353,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF}) |
(static_cast<uint64_t>(mrhs.Value()) << 32)));
}
return NoChange();
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2603b1d18e..66178308be 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -59,17 +59,6 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
return OpParameter<UnalignedStoreRepresentation>(op);
}
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
- return OpParameter<CheckedLoadRepresentation>(op);
-}
-
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
- return OpParameter<CheckedStoreRepresentation>(op);
-}
-
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
@@ -149,7 +138,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -474,14 +462,6 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct ProtectedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
ProtectedLoad##Type##Operator() \
@@ -492,7 +472,6 @@ struct MachineOperatorGlobalCache {
}; \
Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -547,15 +526,6 @@ struct MachineOperatorGlobalCache {
"UnalignedStore", 3, 1, 1, 0, 1, 0, \
MachineRepresentation::Type) {} \
}; \
- struct CheckedStore##Type##Operator final \
- : public Operator1<CheckedStoreRepresentation> { \
- CheckedStore##Type##Operator() \
- : Operator1<CheckedStoreRepresentation>( \
- IrOpcode::kCheckedStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
- } \
- }; \
struct ProtectedStore##Type##Operator \
: public Operator1<StoreRepresentation> { \
explicit ProtectedStore##Type##Operator() \
@@ -572,7 +542,6 @@ struct MachineOperatorGlobalCache {
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- CheckedStore##Type##Operator kCheckedStore##Type; \
ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -644,10 +613,25 @@ struct MachineOperatorGlobalCache {
BitcastWordToTaggedOperator()
: Operator(IrOpcode::kBitcastWordToTagged,
Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
};
BitcastWordToTaggedOperator kBitcastWordToTagged;
+ struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastTaggedToWordOperator kBitcastTaggedToWord;
+
+ struct SpeculationFenceOperator : public Operator {
+ SpeculationFenceOperator()
+ : Operator(IrOpcode::kSpeculationFence, Operator::kNoThrow,
+ "SpeculationFence", 0, 1, 1, 0, 1, 0) {}
+ };
+ SpeculationFenceOperator kSpeculationFence;
+
struct DebugAbortOperator : public Operator {
DebugAbortOperator()
: Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
@@ -823,6 +807,10 @@ const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
return &cache_.kBitcastWordToTagged;
}
+const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
+ return &cache_.kBitcastTaggedToWord;
+}
+
const Operator* MachineOperatorBuilder::DebugAbort() {
return &cache_.kDebugAbort;
}
@@ -835,33 +823,6 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
-const Operator* MachineOperatorBuilder::CheckedLoad(
- CheckedLoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kCheckedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
-
-const Operator* MachineOperatorBuilder::CheckedStore(
- CheckedStoreRepresentation rep) {
- switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return &cache_.kCheckedStore##kRep;
- MACHINE_REPRESENTATION_LIST(STORE)
-#undef STORE
- case MachineRepresentation::kBit:
- case MachineRepresentation::kNone:
- break;
- }
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -952,6 +913,11 @@ const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
UNREACHABLE();
}
+const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
+ return OptionalOperator(flags_ & kSpeculationFence,
+ &cache_.kSpeculationFence);
+}
+
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 3b6634c8bc..10b4b15701 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -83,17 +83,6 @@ typedef MachineRepresentation UnalignedStoreRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*);
-// A CheckedLoad needs a MachineType.
-typedef MachineType CheckedLoadRepresentation;
-
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
-
-
-// A CheckedStore needs a MachineType.
-typedef MachineRepresentation CheckedStoreRepresentation;
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
@@ -154,13 +143,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64ReverseBytes = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
+ kSpeculationFence = 1u << 22,
kAllOptionalOps =
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
- kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow
+ kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow |
+ kSpeculationFence
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -606,11 +597,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
- // checked-load heap, index, length
- const Operator* CheckedLoad(CheckedLoadRepresentation);
- // checked-store heap, index, length, value
- const Operator* CheckedStore(CheckedStoreRepresentation);
-
// atomic-load [base + index]
const Operator* AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
@@ -630,6 +616,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* AtomicXor(MachineType rep);
+ const OptionalOperator SpeculationFence();
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
bool Is64() const { return word() == MachineRepresentation::kWord64; }
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 767ada506a..596204e214 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -92,8 +92,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index b7301749cf..3b57081c9e 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -391,82 +351,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (IsMipsArchVariant(kMips32r6)) { \
__ cfc1(kScratchReg, FCSR); \
@@ -787,7 +671,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Call(at, i.InputRegister(0), 0);
+ __ Call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -816,7 +700,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Jump(at, i.InputRegister(0), 0);
+ __ Jump(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -834,7 +718,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(at, Code::kHeaderSize - kHeapObjectTag);
@@ -998,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
@@ -1203,7 +1088,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1742,14 +1627,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
- __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
- __ Subu(sp, sp, Operand(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
+ __ Subu(sp, sp, Operand(kFloatSize));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMipsPeek: {
+ int reverse_slot = MiscField::decode(instr->opcode());
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ lw(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -1773,46 +1689,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(sw);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
@@ -2593,7 +2469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2601,7 +2477,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2609,7 +2485,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3163,7 +3039,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3438,7 +3314,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3451,10 +3327,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3469,12 +3347,22 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Subu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Addu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 3a2a873e48..dd789d0196 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -128,6 +128,7 @@ namespace compiler {
V(MipsFloat32Min) \
V(MipsFloat64Min) \
V(MipsPush) \
+ V(MipsPeek) \
V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
V(MipsStackClaim) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1053763f0d..35b8a2396d 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -36,7 +36,7 @@ class MipsOperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -92,18 +92,6 @@ class MipsOperandGenerator final : public OperandGenerator {
case kMipsSwc1:
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
// true even for 32b values, offsets > 16b
// are handled in assembler-mips.cc
return is_int32(value);
@@ -233,7 +221,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -432,7 +421,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -531,7 +520,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1181,8 +1170,8 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1191,19 +1180,53 @@ void InstructionSelector::EmitPrepareArguments(
// Possibly align stack here for functions.
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ ++reverse_slot;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMipsPeek | MiscField::encode(reverse_slot), result);
+ }
+ if (output.location.GetType() == MachineType::Float64()) {
+ // Float64 require an implicit second slot.
+ ++reverse_slot;
+ }
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1312,99 +1335,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
@@ -1417,7 +1347,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1630,7 +1561,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -1652,14 +1583,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2057,6 +1988,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 6d43750b1c..d4463008c8 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -403,109 +363,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
- Operand(zero_reg)); \
- } else { \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
- } \
- } while (0)
-
-#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ Or(kScratchReg, zero_reg, Operand(offset)); \
- __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
- __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
- } else { \
- __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (kArchVariant == kMips64r6) { \
@@ -833,14 +690,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt64());
- __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
- __ Jump(at);
+ __ Call(at);
}
+ RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
@@ -886,7 +743,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1050,7 +908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
if (alignment == 2 * kPointerSize) {
@@ -1369,7 +1227,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1394,7 +1252,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
- uint64_t B2 = 0x0f0f0f0f0f0f0f0fl; // (T)~(T)0/255*15
+ uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ dsrl(kScratchReg, src, 1);
@@ -2041,6 +1899,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMips64Peek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = MiscField::decode(instr->opcode()) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -2063,48 +1939,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lw);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ld);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, Lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sw);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sd);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, Swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
@@ -2183,7 +2017,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
- __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
+ __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Zero: {
@@ -2889,7 +2723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2897,7 +2731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2905,7 +2739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3465,7 +3299,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3747,7 +3581,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3760,10 +3594,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3779,11 +3615,21 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Dsubu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Daddu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -3816,7 +3662,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
} else {
Register pop_reg = g.ToRegister(pop);
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 1b420d3819..3058812bec 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -156,6 +156,7 @@ namespace compiler {
V(Mips64Float64Min) \
V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
+ V(Mips64Peek) \
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 0b490c7d77..38f077c4e6 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -36,7 +36,7 @@ class Mips64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -106,20 +106,6 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Swc1:
case kMips64Ldc1:
case kMips64Sdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
return is_int32(value);
default:
return is_int16(value);
@@ -329,7 +315,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -540,7 +527,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -590,7 +577,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -720,7 +707,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -813,7 +800,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1676,7 +1663,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1688,14 +1675,36 @@ void InstructionSelector::EmitPrepareArguments(
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMips64Peek | MiscField::encode(reverse_slot), result);
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1806,127 +1815,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), offset_operand,
- g.UseImmediate(length), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
- g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
@@ -1940,7 +1828,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2111,7 +2000,8 @@ void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
selector->Emit(
kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
- g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
+ g.TempImmediate(
+ static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
}
VisitWordCompare(selector, node, opcode, cont, false);
@@ -2157,7 +2047,7 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsTrap()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.TempImmediate(cont->trap_id()));
@@ -2297,14 +2187,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2750,6 +2640,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index e312dc4354..22004337eb 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,6 +12,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -462,6 +463,20 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
if (IsSame(receiver, effect)) receiver = GetValueInput(effect, 0);
break;
}
+ case IrOpcode::kEffectPhi: {
+ Node* control = GetControlInput(effect);
+ if (control->opcode() != IrOpcode::kLoop) {
+ DCHECK(control->opcode() == IrOpcode::kDead ||
+ control->opcode() == IrOpcode::kMerge);
+ return kNoReceiverMaps;
+ }
+
+ // Continue search for receiver map outside the loop. Since operations
+ // inside the loop may change the map, the result is unreliable.
+ effect = GetEffectInput(effect, 0);
+ result = kUnreliableReceiverMaps;
+ continue;
+ }
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
@@ -488,6 +503,19 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
+MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
+ ZoneHandleSet<Map> maps;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
+ return maps[0];
+ }
+ return MaybeHandle<Map>();
+}
+
+// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
@@ -538,19 +566,19 @@ bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
if (CanBePrimitive(receiver, effect)) {
switch (receiver->opcode()) {
- case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
- case IrOpcode::kCheckSymbol:
- case IrOpcode::kCheckString:
case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckInternalizedString:
- case IrOpcode::kToBoolean:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToString:
+ case IrOpcode::kToBoolean:
return false;
case IrOpcode::kHeapConstant: {
Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5ccc15c1ab..abc6622c83 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
#include "src/compiler/node.h"
#include "src/compiler/types.h"
#include "src/globals.h"
+#include "src/objects/map.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -153,6 +154,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+ static MaybeHandle<Map> GetMapWitness(Node* node);
+
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
// in the effect chain.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 3c3650b8f4..ec6c720af2 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -175,24 +175,25 @@
V(JSConstructWithArrayLike) \
V(JSConstructWithSpread)
-#define JS_OTHER_OP_LIST(V) \
- JS_CONSTRUCT_OP_LIST(V) \
- V(JSCallForwardVarargs) \
- V(JSCall) \
- V(JSCallWithArrayLike) \
- V(JSCallWithSpread) \
- V(JSCallRuntime) \
- V(JSForInEnumerate) \
- V(JSForInNext) \
- V(JSForInPrepare) \
- V(JSLoadMessage) \
- V(JSStoreMessage) \
- V(JSLoadModule) \
- V(JSStoreModule) \
- V(JSGeneratorStore) \
- V(JSGeneratorRestoreContinuation) \
- V(JSGeneratorRestoreRegister) \
- V(JSStackCheck) \
+#define JS_OTHER_OP_LIST(V) \
+ JS_CONSTRUCT_OP_LIST(V) \
+ V(JSCallForwardVarargs) \
+ V(JSCall) \
+ V(JSCallWithArrayLike) \
+ V(JSCallWithSpread) \
+ V(JSCallRuntime) \
+ V(JSForInEnumerate) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
+ V(JSLoadModule) \
+ V(JSStoreModule) \
+ V(JSGeneratorStore) \
+ V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreRegister) \
+ V(JSGeneratorRestoreInputOrDebugPos) \
+ V(JSStackCheck) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -317,6 +318,7 @@
V(NumberTrunc) \
V(NumberToBoolean) \
V(NumberToInt32) \
+ V(NumberToString) \
V(NumberToUint32) \
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
@@ -332,9 +334,12 @@
V(StringCharAt) \
V(StringCharCodeAt) \
V(SeqStringCharCodeAt) \
+ V(StringCodePointAt) \
+ V(SeqStringCodePointAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
V(StringIndexOf) \
+ V(StringLength) \
V(StringToLowerCaseIntl) \
V(StringToUpperCaseIntl) \
V(CheckBounds) \
@@ -371,6 +376,7 @@
V(TransitionAndStoreNumberElement) \
V(TransitionAndStoreNonNumberElement) \
V(ToBoolean) \
+ V(NumberIsFloat64Hole) \
V(ObjectIsArrayBufferView) \
V(ObjectIsBigInt) \
V(ObjectIsCallable) \
@@ -390,6 +396,7 @@
V(NewDoubleElements) \
V(NewSmiOrObjectElements) \
V(NewArgumentsElements) \
+ V(NewConsString) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
@@ -593,8 +600,6 @@
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
- V(CheckedLoad) \
- V(CheckedStore) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
@@ -614,6 +619,7 @@
V(AtomicAnd) \
V(AtomicOr) \
V(AtomicXor) \
+ V(SpeculationFence) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 46d6557b21..5819655633 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -24,6 +24,8 @@ OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
Type* truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
+ singleton_NaN_string_ = Type::HeapConstant(factory->NaN_string(), zone);
+ singleton_zero_string_ = Type::HeapConstant(factory->zero_string(), zone);
singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
@@ -503,6 +505,14 @@ Type* OperationTyper::NumberToInt32(Type* type) {
return Type::Signed32();
}
+Type* OperationTyper::NumberToString(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(Type::NaN())) return singleton_NaN_string_;
+ if (type->Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
+ return Type::SeqString();
+}
+
Type* OperationTyper::NumberToUint32(Type* type) {
DCHECK(type->Is(Type::Number()));
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 4a9c4ffb08..282cb0c750 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -94,6 +94,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type* infinity_;
Type* minus_infinity_;
+ Type* singleton_NaN_string_;
+ Type* singleton_zero_string_;
Type* singleton_false_;
Type* singleton_true_;
Type* singleton_the_hole_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 045d695ecf..b4567ab04f 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -253,11 +253,6 @@ class PipelineData {
source_position_output_ = source_position_output;
}
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions()
- const {
- return protected_instructions_;
- }
-
JumpOptimizationInfo* jump_optimization_info() const {
return jump_optimization_info_;
}
@@ -435,21 +430,26 @@ class PipelineImpl final {
template <typename Phase, typename Arg0, typename Arg1>
void Run(Arg0 arg_0, Arg1 arg_1);
- // Run the graph creation and initial optimization passes.
+ // Step A. Run the graph creation and initial optimization passes.
bool CreateGraph();
- // Run the concurrent optimization passes.
+ // B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Run the code assembly pass.
+ // Substep B.1. Produce a scheduled graph.
+ void ComputeScheduledGraph();
+
+ // Substep B.2. Select instructions from a scheduled graph.
+ bool SelectInstructions(Linkage* linkage);
+
+ // Step C. Run the code assembly pass.
void AssembleCode(Linkage* linkage);
- // Run the code finalization pass.
+ // Step D. Run the code finalization pass.
Handle<Code> FinalizeCode();
- bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+ Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* descriptor, bool run_verifier);
@@ -803,7 +803,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (!pipeline_.CreateGraph()) {
if (isolate->has_pending_exception()) return FAILED; // Stack overflowed.
- return AbortOptimization(kGraphBuildingFailed);
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
@@ -826,8 +826,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
- if (compilation_info()->bailout_reason() == kNoReason) {
- return AbortOptimization(kCodeGenerationFailed);
+ if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
+ return AbortOptimization(BailoutReason::kCodeGenerationFailed);
}
return FAILED;
}
@@ -964,7 +964,8 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
pipeline_.RunPrintAndVerify("Optimized Machine", true);
}
- if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
+ pipeline_.ComputeScheduledGraph();
+ if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
return SUCCEEDED;
}
@@ -995,9 +996,7 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
}
void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
-#if !DEBUG
- return;
-#endif
+#if DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -1038,6 +1037,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
}
CHECK(is_immovable || is_wasm || is_allowed_stub);
}
+#endif
}
template <typename Phase>
@@ -1269,8 +1269,9 @@ struct LoopPeelingPhase {
LoopTree* loop_tree =
LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
- LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
- temp_zone);
+ LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
+ data->source_positions())
+ .PeelInnerLoopsOfTree();
}
};
@@ -1880,7 +1881,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ info()->AbortOptimization(
+ BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
data->EndPhaseKind();
return false;
}
@@ -1941,7 +1943,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->source_positions()->RemoveDecorator();
- return ScheduleAndSelectInstructions(linkage, true);
+ ComputeScheduledGraph();
+
+ return SelectInstructions(linkage);
}
Handle<Code> Pipeline::GenerateCodeForCodeStub(
@@ -1982,7 +1986,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
}
pipeline.Run<VerifyGraphPhase>(false, true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2043,7 +2047,12 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
// TODO(rossberg): Should this really be untyped?
pipeline.RunPrintAndVerify("Machine", true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ // Ensure we have a schedule.
+ if (data.schedule() == nullptr) {
+ pipeline.ComputeScheduledGraph();
+ }
+
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2082,19 +2091,26 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
return !data.compilation_failed();
}
-bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
- bool trim_graph) {
- CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+void PipelineImpl::ComputeScheduledGraph() {
PipelineData* data = this->data_;
- DCHECK_NOT_NULL(data->graph());
+ // We should only schedule the graph if it is not scheduled yet.
+ DCHECK_NULL(data->schedule());
- if (trim_graph) {
- Run<LateGraphTrimmingPhase>();
- RunPrintAndVerify("Late trimmed", true);
- }
- if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
+ Run<LateGraphTrimmingPhase>();
+ RunPrintAndVerify("Late trimmed", true);
+
+ Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->isolate(), data->schedule());
+}
+
+bool PipelineImpl::SelectInstructions(Linkage* linkage) {
+ CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+ PipelineData* data = this->data_;
+
+ // We should have a scheduled graph.
+ DCHECK_NOT_NULL(data->graph());
+ DCHECK_NOT_NULL(data->schedule());
if (FLAG_turbo_profiling) {
data->set_profiler_data(BasicBlockInstrumentor::Instrument(
@@ -2138,7 +2154,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
- info()->AbortOptimization(kCodeGenerationFailed);
+ info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
data->EndPhaseKind();
return false;
}
@@ -2177,7 +2193,8 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+ info()->AbortOptimization(
+ BailoutReason::kNotEnoughVirtualRegistersRegalloc);
data->EndPhaseKind();
return false;
}
@@ -2208,6 +2225,8 @@ Handle<Code> PipelineImpl::FinalizeCode() {
Run<FinalizeCodePhase>();
Handle<Code> code = data->code();
+ if (code.is_null()) return code;
+
if (data->profiler_data()) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
@@ -2245,12 +2264,11 @@ Handle<Code> PipelineImpl::FinalizeCode() {
return code;
}
-Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
- CallDescriptor* call_descriptor) {
+Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
Linkage linkage(call_descriptor);
- // Schedule the graph, perform instruction selection and register allocation.
- if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
+ // Perform instruction selection and register allocation.
+ if (!SelectInstructions(&linkage)) return Handle<Code>();
// Generate the final machine code.
AssembleCode(&linkage);
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 2dca7794eb..b5b6b5f142 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -76,7 +76,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(
+ V8_EXPORT_PRIVATE static Handle<Code> GenerateCodeForTesting(
CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule = nullptr,
SourcePositionTable* source_positions = nullptr);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 11fde27fc9..7fc537784c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -118,48 +118,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ li(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -653,134 +611,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define CleanUInt32(x)
#endif
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ frsp(kScratchDoubleReg, value); \
- /* removed frsp as instruction-selector checked */ \
- /* value to be kFloat32 */ \
- if (mode == kMode_MRI) { \
- __ stfs(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfsx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- if (mode == kMode_MRI) { \
- __ stfd(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfdx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
@@ -1003,8 +833,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_PPC64
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1072,7 +907,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2021,58 +1856,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- __ extsb(i.OutputRegister(), i.OutputRegister());
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
@@ -2208,7 +1991,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2382,7 +2165,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2555,10 +2338,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// converts it to qnan on ia32/x64
if (src.type() == Constant::kFloat32) {
uint32_t val = src.ToFloat32AsInt();
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
uint64_t dval = static_cast<uint64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
value = Double(dval);
} else {
value = Double(static_cast<double>(src.ToFloat32()));
@@ -2672,69 +2455,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mr(temp, src);
- __ mr(src, dst);
- __ mr(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ mr(temp, src);
- __ LoadP(src, dst);
- __ StoreP(temp, dst);
- }
-#if V8_TARGET_ARCH_PPC64
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
-#else
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
-#endif
- Register temp_0 = kScratchReg;
- Register temp_1 = r0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ LoadP(temp_0, src);
- __ LoadP(temp_1, dst);
- __ StoreP(temp_0, dst);
- __ StoreP(temp_1, src);
- } else if (source->IsFPRegister()) {
- DoubleRegister temp = kScratchDoubleReg;
- DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DoubleRegister dst = g.ToDoubleRegister(destination);
- __ fmr(temp, src);
- __ fmr(src, dst);
- __ fmr(dst, temp);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ fmr(temp, src);
- __ lfd(src, dst);
- __ stfd(temp, dst);
- }
-#if !V8_TARGET_ARCH_PPC64
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- DoubleRegister temp_0 = kScratchDoubleReg;
- DoubleRegister temp_1 = d0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ lfd(temp_0, src);
- __ lfd(temp_1, dst);
- __ stfd(temp_0, dst);
- __ stfd(temp_1, src);
-#endif
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 8454590ee2..fced5565df 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -366,101 +367,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -553,7 +459,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -592,7 +498,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -756,7 +662,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
if (mb >= me) {
Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
@@ -782,7 +688,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1033,6 +939,8 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -1553,7 +1461,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1800,14 +1709,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1989,7 +1898,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -1997,8 +1906,8 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
}
}
}
@@ -2164,6 +2073,190 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2176,7 +2269,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
- // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
}
// static
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 5e79cbdfec..bead0618f6 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -69,8 +69,9 @@ bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
} else {
// Monormorphic string access (ignoring the fact that there are multiple
// String maps).
- *receiver = *effect = graph()->NewNode(simplified()->CheckString(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ *receiver, *effect, control);
}
return true;
}
@@ -82,8 +83,9 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
Node* control) {
if (HasOnlyNumberMaps(maps)) {
// Monomorphic number access (we also deal with Smis here).
- *receiver = *effect = graph()->NewNode(simplified()->CheckNumber(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), *receiver,
+ *effect, control);
return true;
}
return false;
@@ -175,8 +177,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Node** effect,
Node* expected = jsgraph()->HeapConstant(value);
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), receiver, expected);
- *effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, *effect, control);
+ *effect =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
+ check, *effect, control);
return expected;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index bed2f628d9..ed67c06cc7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -134,7 +134,6 @@ void RawMachineAssembler::Return(Node* value) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {Int32Constant(0), v1, v2};
Node* ret = MakeNode(common()->Return(2), 3, values);
@@ -142,7 +141,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {Int32Constant(0), v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 4, values);
@@ -150,6 +148,24 @@ void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
current_block_ = nullptr;
}
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3, Node* v4) {
+ Node* values[] = {Int32Constant(0), v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::Return(int count, Node* vs[]) {
+ typedef Node* Node_ptr;
+ Node** values = new Node_ptr[count + 1];
+ values[0] = Int32Constant(0);
+ for (int i = 0; i < count; ++i) values[i + 1] = vs[i];
+ Node* ret = MakeNode(common()->Return(count), count + 1, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+ delete[] values;
+}
+
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
@@ -172,6 +188,14 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
current_block_ = nullptr;
}
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
+ Node* v4) {
+ Node* values[] = {pop, v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
void RawMachineAssembler::DebugAbort(Node* message) {
AddNode(machine()->DebugAbort(), message);
}
@@ -430,7 +454,7 @@ void RawMachineAssembler::Bind(RawMachineLabel* label,
str << "Binding label without closing previous block:"
<< "\n# label: " << info
<< "\n# previous block: " << *current_block_;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
Bind(label);
current_block_->set_debug_info(info);
@@ -495,7 +519,7 @@ RawMachineLabel::~RawMachineLabel() {
} else {
str << "A label has been used but it's not bound.";
}
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
#endif // DEBUG
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 9fc3590875..1cc56b3379 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -186,6 +186,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
old_value, new_value);
}
+ Node* SpeculationFence() {
+ return AddNode(machine()->SpeculationFence().op());
+ }
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
@@ -828,9 +832,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
+ void Return(Node* v1, Node* v2, Node* v3, Node* v4);
+ void Return(int count, Node* v[]);
void PopAndReturn(Node* pop, Node* value);
void PopAndReturn(Node* pop, Node* v1, Node* v2);
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
+ void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
void DebugAbort(Node* message);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 3a40e8d5bf..eedf946fb6 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -19,26 +20,36 @@ Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckEqualsInternalizedString:
+ case IrOpcode::kCheckEqualsSymbol:
case IrOpcode::kCheckFloat64Hole:
case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
case IrOpcode::kCheckInternalizedString:
+ case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckReceiver:
+ case IrOpcode::kCheckSeqString:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
- case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckNotTaggedHole:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
- case IrOpcode::kCheckedInt32Sub:
case IrOpcode::kCheckedInt32Div:
case IrOpcode::kCheckedInt32Mod:
case IrOpcode::kCheckedInt32Mul:
- case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kCheckedUint32Div:
+ case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
return ReduceCheckNode(node);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
@@ -124,13 +135,43 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
-bool IsCompatibleCheck(Node const* a, Node const* b) {
+// Does check {a} subsume check {b}?
+bool CheckSubsumes(Node const* a, Node const* b) {
if (a->op() != b->op()) {
if (a->opcode() == IrOpcode::kCheckInternalizedString &&
b->opcode() == IrOpcode::kCheckString) {
// CheckInternalizedString(node) implies CheckString(node)
- } else {
+ } else if (a->opcode() != b->opcode()) {
return false;
+ } else {
+ switch (a->opcode()) {
+ case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckNumber:
+ break;
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
+ break;
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32: {
+ const CheckMinusZeroParameters& ap =
+ CheckMinusZeroParametersOf(a->op());
+ const CheckMinusZeroParameters& bp =
+ CheckMinusZeroParametersOf(b->op());
+ if (ap.mode() != bp.mode()) {
+ return false;
+ }
+ break;
+ }
+ default:
+ DCHECK(!IsCheckedWithFeedback(a->op()));
+ return false;
+ }
}
}
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
@@ -143,7 +184,7 @@ bool IsCompatibleCheck(Node const* a, Node const* b) {
Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
- if (IsCompatibleCheck(check->node, node)) {
+ if (CheckSubsumes(check->node, node)) {
DCHECK(!check->node->IsDead());
return check->node;
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index b0a345a57f..f8a5a9c504 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -216,7 +216,9 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedSigned),
+ node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed31())) {
op = simplified()->ChangeInt31ToTaggedSigned();
@@ -224,14 +226,14 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
} else if (output_type->Is(Type::Unsigned32()) &&
use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -247,7 +249,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -256,17 +258,18 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
use_info.type_check() == TypeCheckKind::kSignedSmall) {
// float64 -> uint32 -> tagged signed
node = InsertChangeFloat64ToUint32(node);
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -279,12 +282,13 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -292,7 +296,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
}
} else if (CanBeTaggedPointer(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else if (output_type->Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedToTaggedSigned();
} else {
@@ -304,7 +308,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts.
// Also use that for MachineRepresentation::kPointer case above.
node = InsertChangeBitToTagged(node);
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -334,7 +338,9 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
Operator const* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
+ node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -378,7 +384,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToTaggedPointer();
+ op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -411,7 +417,8 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTagged), node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -489,7 +496,8 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat32), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
@@ -549,7 +557,8 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
@@ -626,7 +635,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
@@ -637,7 +647,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -655,7 +666,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -671,12 +683,13 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedSignedToInt32();
+ op = simplified()->CheckedTaggedSignedToInt32(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedTaggedToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -684,10 +697,10 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->TruncateTaggedToWord32();
} else if (use_info.type_check() == TypeCheckKind::kNumber) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumber);
+ CheckTaggedInputMode::kNumber, use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumberOrOddball);
+ CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -704,7 +717,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
if (output_type->Is(Type::Signed32())) {
return node;
} else if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->CheckedUint32ToInt32();
+ op = simplified()->CheckedUint32ToInt32(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -762,7 +775,8 @@ Node* RepresentationChanger::GetBitRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kBit), node);
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
@@ -807,7 +821,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 52a3e75c8a..b23a3dac5b 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -147,13 +147,18 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
// to the preferred representation. The preferred representation might be
// insufficient to do the conversion (e.g. word32->float64 conv), so we also
// need the signedness information to produce the correct value.
+// Additionally, use info may contain {CheckParameters} which contains
+// information for the deoptimizer such as a CallIC on which speculation
+// should be disallowed if the check fails.
class UseInfo {
public:
UseInfo(MachineRepresentation representation, Truncation truncation,
- TypeCheckKind type_check = TypeCheckKind::kNone)
+ TypeCheckKind type_check = TypeCheckKind::kNone,
+ const VectorSlotPair& feedback = VectorSlotPair())
: representation_(representation),
truncation_(truncation),
- type_check_(type_check) {}
+ type_check_(type_check),
+ feedback_(feedback) {}
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
@@ -187,14 +192,16 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject);
}
- static UseInfo CheckedSignedSmallAsTaggedSigned() {
+ static UseInfo CheckedSignedSmallAsTaggedSigned(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
- TypeCheckKind::kSignedSmall);
+ TypeCheckKind::kSignedSmall, feedback);
}
- static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros) {
+ static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32,
- Truncation::Any(identify_zeros),
- TypeCheckKind::kSignedSmall);
+ Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
+ feedback);
}
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
return UseInfo(MachineRepresentation::kWord32,
@@ -238,11 +245,13 @@ class UseInfo {
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
}
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
MachineRepresentation representation_;
Truncation truncation_;
TypeCheckKind type_check_;
+ VectorSlotPair feedback_;
};
// Contains logic related to changing the representation of values for constants
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f49a8e540c..c0d3146be1 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -156,45 +156,6 @@ static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -938,102 +899,6 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ asm_instr(value, operand); \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreFloat32(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreDouble(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- __ CleanUInt32(offset); \
- __ asm_instr(value, operand); \
- __ bind(&done); \
- } while (0)
-
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
@@ -1219,8 +1084,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_S390X
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1283,7 +1153,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2107,7 +1977,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToNearest);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2116,7 +1986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2127,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2143,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2156,7 +2026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToZero);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2165,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2177,7 +2047,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2192,7 +2062,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2334,56 +2204,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadB);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
case kAtomicLoadInt8:
__ LoadB(i.OutputRegister(), i.MemoryOperand());
break;
@@ -2629,7 +2449,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2762,7 +2582,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 54f5a0c68b..457c5a1d82 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -613,7 +613,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -688,7 +689,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -850,99 +852,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm));
-}
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
-}
-
#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation(value);
@@ -987,7 +896,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -1075,7 +984,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1249,6 +1158,8 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
}
@@ -1728,7 +1639,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1816,7 +1728,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
DCHECK(input_count <= 8 && output_count <= 1);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1921,7 +1834,8 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -2183,14 +2097,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2351,7 +2265,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -2361,19 +2275,20 @@ void InstructionSelector::EmitPrepareArguments(
int slot = 0;
for (PushParameter input : *arguments) {
- if (input.node() == nullptr) continue;
- num_slots +=
- input.type().representation() == MachineRepresentation::kFloat64
- ? kDoubleSize / kPointerSize
- : 1;
+ if (input.node == nullptr) continue;
+ num_slots += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
+ ? kDoubleSize / kPointerSize
+ : 1;
}
Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
for (PushParameter input : *arguments) {
// Skip any alignment holes in pushed nodes.
- if (input.node()) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
- slot += input.type().representation() == MachineRepresentation::kFloat64
+ slot += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
? (kDoubleSize / kPointerSize)
: 1;
}
@@ -2489,6 +2404,190 @@ void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 582fbd6424..423d757a4f 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -19,8 +19,8 @@ namespace {
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
-static const int32_t kMask16 = 0xffff;
-static const int32_t kMask8 = 0xff;
+static const int32_t kMask16 = 0xFFFF;
+static const int32_t kMask8 = 0xFF;
static const int32_t kShift16 = 16;
static const int32_t kShift8 = 24;
} // anonymous
@@ -595,7 +595,7 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
- static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
+ static_cast<double>(is_signed ? kMaxInt : 0xFFFFFFFFu)));
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
@@ -913,7 +913,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kNumLanes32];
- Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
+ Node* mask = graph()->NewNode(common()->Int32Constant(0xFFFFFFFF));
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 9bdb7cfbaf..6e6c011fc1 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -92,7 +92,8 @@ UseInfo CheckedUseInfoAsWord32FromHint(
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- return UseInfo::CheckedSignedSmallAsWord32(identify_zeros);
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros,
+ VectorSlotPair());
case NumberOperationHint::kSigned32:
return UseInfo::CheckedSigned32AsWord32(identify_zeros);
case NumberOperationHint::kNumber:
@@ -1345,17 +1346,6 @@ class RepresentationSelector {
void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
- // only eliminate an unused speculative number operation if we know that
- // the inputs are PlainPrimitive, which excludes everything that's might
- // have side effects or throws during a ToNumber conversion. We are only
- // allowed to perform a number addition if neither input is a String, even
- // if the value is never used, so we further limit to NumberOrOddball in
- // order to explicitly exclude String inputs.
- if (BothInputsAre(node, Type::NumberOrOddball())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
-
if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node)->Is(Type::Signed32()) ||
GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1377,13 +1367,6 @@ class RepresentationSelector {
void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
@@ -1514,8 +1497,20 @@ class RepresentationSelector {
return VisitLeaf(node, MachineRepresentation::kWord64);
case IrOpcode::kExternalConstant:
return VisitLeaf(node, MachineType::PointerRepresentation());
- case IrOpcode::kNumberConstant:
- return VisitLeaf(node, MachineRepresentation::kTagged);
+ case IrOpcode::kNumberConstant: {
+ double const value = OpParameter<double>(node);
+ int value_as_int;
+ if (DoubleToSmiInteger(value, &value_as_int)) {
+ VisitLeaf(node, MachineRepresentation::kTaggedSigned);
+ if (lower()) {
+ intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
+ DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi));
+ }
+ return;
+ }
+ VisitLeaf(node, MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kHeapConstant:
return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kPointerConstant: {
@@ -1668,13 +1663,6 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
case IrOpcode::kSpeculativeNumberEqual: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
// Number comparisons reduce to integer comparisons for integer inputs.
if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
@@ -1707,8 +1695,10 @@ class RepresentationSelector {
Node* rhs = node->InputAt(1);
if (IsNodeRepresentationTagged(lhs) &&
IsNodeRepresentationTagged(rhs)) {
- VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kBit);
+ VisitBinop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()),
+ MachineRepresentation::kBit);
ChangeToPureOp(
node, changer_->TaggedSignedOperatorFor(node->opcode()));
@@ -1755,13 +1745,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberMultiply: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Integral32()) &&
(NodeProperties::GetType(node)->Is(Type::Signed32()) ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
@@ -1785,7 +1768,7 @@ class RepresentationSelector {
// Handle the case when no int32 checks on inputs are necessary
// (but an overflow check is needed on the output).
if (BothInputsAre(node, Type::Signed32())) {
- // If both the inputs the feedback are int32, use the overflow op.
+ // If both inputs and feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1836,13 +1819,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberDivide: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
@@ -2014,13 +1990,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftLeft: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2050,13 +2019,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRight: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2086,13 +2048,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRightLogical: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that
- // might have side effects or throw during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type* rhs_type = GetUpperBound(node->InputAt(1));
if (rhs_type->Is(type_cache_.kZeroish) &&
@@ -2107,8 +2062,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower()) {
node->RemoveInput(1);
- NodeProperties::ChangeOp(node,
- simplified()->CheckedUint32ToInt32());
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32ToInt32(VectorSlotPair()));
}
return;
}
@@ -2315,6 +2270,11 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kNumberToString: {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2365,6 +2325,13 @@ class RepresentationSelector {
return VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kNewConsString: {
+ ProcessInput(node, 0, UseInfo::TaggedSigned()); // length
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
@@ -2391,6 +2358,12 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kStringCodePointAt: {
+ // TODO(turbofan): Allow builtins to return untagged values.
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringFromCharCode: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kTaggedPointer);
@@ -2408,6 +2381,14 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedSigned);
return;
}
+ case IrOpcode::kStringLength: {
+ // TODO(bmeurer): The input representation should be TaggedPointer.
+ // Fix this once we have a dedicated StringConcat/JSStringAdd
+ // operator, which marks it's output as TaggedPointer properly.
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2479,13 +2460,17 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckSmi: {
+ const CheckParameters& params = CheckParametersOf(node->op());
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
VisitUnop(node,
- UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros),
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros,
+ params.feedback()),
MachineRepresentation::kWord32);
} else {
- VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kTaggedSigned);
+ VisitUnop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(params.feedback()),
+ MachineRepresentation::kTaggedSigned);
}
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
@@ -2589,6 +2574,11 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kNumberIsFloat64Hole: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kTransitionAndStoreElement: {
Type* value_type = TypeOf(node->InputAt(2));
@@ -2977,7 +2967,6 @@ class RepresentationSelector {
}
ProcessRemainingInputs(node, 1);
SetOutput(node, representation);
- if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
@@ -3702,7 +3691,7 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
- jsgraph()->Int32Constant(0x1f)));
+ jsgraph()->Int32Constant(0x1F)));
}
ChangeToPureOp(node, op);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index eaa148ee04..a78d885e6e 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -20,7 +20,7 @@ class RepresentationSelector;
class SourcePositionTable;
class TypeCache;
-class SimplifiedLowering final {
+class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 04bbc7bba8..9978bae122 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -149,9 +149,7 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
- op->opcode() == IrOpcode::kCheckedInt32Mul ||
- op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
- op->opcode() == IrOpcode::kCheckedTaggedToInt32);
+ op->opcode() == IrOpcode::kCheckedInt32Mul);
return OpParameter<CheckForMinusZeroMode>(op);
}
@@ -215,15 +213,20 @@ size_t hash_value(MapsParameterInfo const& p) { return hash_value(p.maps()); }
bool operator==(CheckMapsParameters const& lhs,
CheckMapsParameters const& rhs) {
- return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps() &&
+ lhs.feedback() == rhs.feedback();
}
size_t hash_value(CheckMapsParameters const& p) {
- return base::hash_combine(p.flags(), p.maps());
+ return base::hash_combine(p.flags(), p.maps(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
- return os << p.flags() << p.maps_info();
+ os << p.flags() << p.maps_info();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
@@ -256,8 +259,7 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
}
CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64 ||
- op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
return OpParameter<CheckTaggedInputMode>(op);
}
@@ -271,9 +273,28 @@ std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
UNREACHABLE();
}
-GrowFastElementsMode GrowFastElementsModeOf(const Operator* op) {
+bool operator==(const GrowFastElementsParameters& lhs,
+ const GrowFastElementsParameters& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+inline size_t hash_value(const GrowFastElementsParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const GrowFastElementsParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << params.feedback();
+ }
+ return os;
+}
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(
+ const Operator* op) {
DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
- return OpParameter<GrowFastElementsMode>(op);
+ return OpParameter<GrowFastElementsParameters>(op);
}
bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
@@ -520,9 +541,9 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
return OpParameter<UnicodeEncoding>(op);
}
-BailoutReason BailoutReasonOf(const Operator* op) {
+AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
- return OpParameter<BailoutReason>(op);
+ return static_cast<AbortReason>(OpParameter<int>(op));
}
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
@@ -530,6 +551,54 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
return OpParameter<DeoptimizeReason>(op);
}
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
+ const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ return OpParameter<CheckTaggedInputParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckTaggedInputParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckTaggedInputParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckTaggedInputParameters const& lhs,
+ CheckTaggedInputParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op) {
+ DCHECK(IrOpcode::kCheckedTaggedToInt32 == op->opcode() ||
+ IrOpcode::kCheckedFloat64ToInt32 == op->opcode());
+ return OpParameter<CheckMinusZeroParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckMinusZeroParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckMinusZeroParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckMinusZeroParameters const& lhs,
+ CheckMinusZeroParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -581,6 +650,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(NumberTrunc, Operator::kNoProperties, 1, 0) \
V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
@@ -588,8 +658,11 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(TypeOf, Operator::kNoProperties, 1, 1) \
@@ -626,6 +699,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(ObjectIsString, Operator::kNoProperties, 1, 0) \
V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(SameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
@@ -633,6 +707,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringLessThan, Operator::kNoProperties, 2, 0) \
V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0) \
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -642,30 +717,32 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(SpeculativeNumberLessThanOrEqual)
#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
+ V(CheckEqualsInternalizedString, 2, 0) \
+ V(CheckEqualsSymbol, 2, 0) \
V(CheckHeapObject, 1, 1) \
V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
V(CheckSeqString, 1, 1) \
V(CheckSymbol, 1, 1) \
- V(CheckNotTaggedHole, 1, 1) \
- V(CheckEqualsInternalizedString, 2, 0) \
- V(CheckEqualsSymbol, 2, 0) \
V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
V(CheckedInt32Div, 2, 1) \
V(CheckedInt32Mod, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedUint32Mod, 2, 1)
+
+#define CHECKED_WITH_FEEDBACK_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedTaggedToTaggedPointer, 1, 1)
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -689,6 +766,18 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_OP_LIST(CHECKED)
#undef CHECKED
+#define CHECKED_WITH_FEEDBACK(Name, value_input_count, value_output_count) \
+ struct Name##Operator final : public Operator1<CheckParameters> { \
+ Name##Operator() \
+ : Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(VectorSlotPair())) {} \
+ }; \
+ Name##Operator k##Name;
+ CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
+#undef CHECKED_WITH_FEEDBACK
+
template <DeoptimizeReason kDeoptimizeReason>
struct CheckIfOperator final : public Operator1<DeoptimizeReason> {
CheckIfOperator()
@@ -772,12 +861,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedFloat64ToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedFloat64ToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedFloat64ToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedFloat64ToInt32CheckForMinusZeroOperator;
@@ -786,12 +876,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedTaggedToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedTaggedToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedTaggedToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedTaggedToInt32CheckForMinusZeroOperator;
@@ -814,12 +905,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckTaggedInputMode kMode>
struct CheckedTruncateTaggedToWord32Operator final
- : public Operator1<CheckTaggedInputMode> {
+ : public Operator1<CheckTaggedInputParameters> {
CheckedTruncateTaggedToWord32Operator()
- : Operator1<CheckTaggedInputMode>(
+ : Operator1<CheckTaggedInputParameters>(
IrOpcode::kCheckedTruncateTaggedToWord32,
Operator::kFoldable | Operator::kNoThrow,
- "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, kMode) {}
+ "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0,
+ CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
};
CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
kCheckedTruncateTaggedToWord32NumberOperator;
@@ -867,6 +959,20 @@ struct SimplifiedOperatorGlobalCache final {
};
EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+ template <GrowFastElementsMode kMode>
+ struct GrowFastElementsOperator final
+ : public Operator1<GrowFastElementsParameters> {
+ GrowFastElementsOperator()
+ : Operator1(IrOpcode::kMaybeGrowFastElements, Operator::kNoThrow,
+ "MaybeGrowFastElements", 4, 1, 1, 1, 1, 0,
+ GrowFastElementsParameters(kMode, VectorSlotPair())) {}
+ };
+
+ GrowFastElementsOperator<GrowFastElementsMode::kDoubleElements>
+ kGrowFastElementsOperatorDoubleElements;
+ GrowFastElementsOperator<GrowFastElementsMode::kSmiOrObjectElements>
+ kGrowFastElementsOperatorSmiOrObjectElements;
+
struct LoadFieldByIndexOperator final : public Operator {
LoadFieldByIndexOperator()
: Operator( // --
@@ -934,13 +1040,38 @@ GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
GET_FROM_CACHE(LoadFieldByIndex)
#undef GET_FROM_CACHE
-const Operator* SimplifiedOperatorBuilder::RuntimeAbort(BailoutReason reason) {
- return new (zone()) Operator1<BailoutReason>( // --
- IrOpcode::kRuntimeAbort, // opcode
- Operator::kNoThrow | Operator::kNoDeopt, // flags
- "RuntimeAbort", // name
- 0, 1, 1, 0, 1, 0, // counts
- reason); // parameter
+#define GET_FROM_CACHE_WITH_FEEDBACK(Name, value_input_count, \
+ value_output_count) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ const VectorSlotPair& feedback) { \
+ if (!feedback.IsValid()) { \
+ return &cache_.k##Name; \
+ } \
+ return new (zone()) Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, \
+ value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(feedback)); \
+ }
+CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
+#undef GET_FROM_CACHE_WITH_FEEDBACK
+
+bool IsCheckedWithFeedback(const Operator* op) {
+#define CASE(Name, ...) case IrOpcode::k##Name:
+ switch (op->opcode()) {
+ CHECKED_WITH_FEEDBACK_OP_LIST(CASE) return true;
+ default:
+ return false;
+ }
+#undef CASE
+}
+
+const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kRuntimeAbort, // opcode
+ Operator::kNoThrow | Operator::kNoDeopt, // flags
+ "RuntimeAbort", // name
+ 0, 1, 1, 0, 1, 0, // counts
+ static_cast<int>(reason)); // parameter
}
const Operator* SimplifiedOperatorBuilder::CheckIf(DeoptimizeReason reason) {
@@ -977,25 +1108,35 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
}
const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedFloat64ToInt32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32", 1, 1,
+ 1, 1, 1, 0, CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedTaggedToInt32, Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTaggedToInt32", 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
@@ -1010,19 +1151,25 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
}
const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode mode) {
- switch (mode) {
- case CheckTaggedInputMode::kNumber:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
- case CheckTaggedInputMode::kNumberOrOddball:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckTaggedInputParameters>(
+ IrOpcode::kCheckedTruncateTaggedToWord32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTruncateTaggedToWord32",
+ 1, 1, 1, 1, 1, 0, CheckTaggedInputParameters(mode, feedback));
}
-const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
- ZoneHandleSet<Map> maps) {
- CheckMapsParameters const parameters(flags, maps);
+const Operator* SimplifiedOperatorBuilder::CheckMaps(
+ CheckMapsFlags flags, ZoneHandleSet<Map> maps,
+ const VectorSlotPair& feedback) {
+ CheckMapsParameters const parameters(flags, maps, feedback);
return new (zone()) Operator1<CheckMapsParameters>( // --
IrOpcode::kCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
@@ -1096,13 +1243,21 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
}
const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
- GrowFastElementsMode mode) {
- return new (zone()) Operator1<GrowFastElementsMode>( // --
- IrOpcode::kMaybeGrowFastElements, // opcode
- Operator::kNoThrow, // flags
- "MaybeGrowFastElements", // name
- 4, 1, 1, 1, 1, 0, // counts
- mode); // parameter
+ GrowFastElementsMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case GrowFastElementsMode::kDoubleElements:
+ return &cache_.kGrowFastElementsOperatorDoubleElements;
+ case GrowFastElementsMode::kSmiOrObjectElements:
+ return &cache_.kGrowFastElementsOperatorSmiOrObjectElements;
+ }
+ }
+ return new (zone()) Operator1<GrowFastElementsParameters>( // --
+ IrOpcode::kMaybeGrowFastElements, // opcode
+ Operator::kNoThrow, // flags
+ "MaybeGrowFastElements", // name
+ 4, 1, 1, 1, 1, 0, // counts
+ GrowFastElementsParameters(mode, feedback)); // parameter
}
const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
@@ -1160,6 +1315,23 @@ bool IsRestLengthOf(const Operator* op) {
return OpParameter<ArgumentsLengthParameters>(op).is_rest_length;
}
+bool operator==(CheckParameters const& lhs, CheckParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(CheckParameters const& p) { return hash_value(p.feedback()); }
+
+std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
+ return os << p.feedback();
+}
+
+CheckParameters const& CheckParametersOf(Operator const* op) {
+#define MAKE_OR(name, arg2, arg3) op->opcode() == IrOpcode::k##name ||
+ CHECK((CHECKED_WITH_FEEDBACK_OP_LIST(MAKE_OR) false));
+#undef MAKE_OR
+ return OpParameter<CheckParameters>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::NewDoubleElements(
PretenureFlag pretenure) {
return new (zone()) Operator1<PretenureFlag>( // --
@@ -1292,6 +1464,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
#undef PURE_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
+#undef CHECKED_WITH_FEEDBACK_OP_LIST
#undef CHECKED_OP_LIST
#undef ACCESS_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 0ed46b0e7a..10961cf452 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -16,6 +16,7 @@
#include "src/machine-type.h"
#include "src/objects.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -91,6 +92,28 @@ ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+// A the parameters for several Check nodes. The {feedback} parameter is
+// optional. If {feedback} references a valid CallIC slot and this MapCheck
+// fails, then speculation on that CallIC slot will be disabled.
+class CheckParameters final {
+ public:
+ explicit CheckParameters(const VectorSlotPair& feedback)
+ : feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair feedback_;
+};
+
+bool operator==(CheckParameters const&, CheckParameters const&);
+
+size_t hash_value(CheckParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckParameters const&);
+
+CheckParameters const& CheckParametersOf(Operator const*) WARN_UNUSED_RESULT;
+
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
kAllowReturnHole // Allow to return the hole (signaling NaN).
@@ -111,7 +134,32 @@ size_t hash_value(CheckTaggedInputMode);
std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*);
+
+class CheckTaggedInputParameters {
+ public:
+ CheckTaggedInputParameters(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckTaggedInputMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckTaggedInputMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&,
+ const CheckTaggedInputParameters& params);
+
+size_t hash_value(const CheckTaggedInputParameters& params);
+
+bool operator==(CheckTaggedInputParameters const&,
+ CheckTaggedInputParameters const&);
enum class CheckForMinusZeroMode : uint8_t {
kCheckForMinusZero,
@@ -125,6 +173,30 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+class CheckMinusZeroParameters {
+ public:
+ CheckMinusZeroParameters(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckForMinusZeroMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckForMinusZeroMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&, const CheckMinusZeroParameters& params);
+
+size_t hash_value(const CheckMinusZeroParameters& params);
+
+bool operator==(CheckMinusZeroParameters const&,
+ CheckMinusZeroParameters const&);
+
// Flags for map checks.
enum class CheckMapsFlag : uint8_t {
kNone = 0u,
@@ -155,19 +227,24 @@ bool operator!=(MapsParameterInfo const&, MapsParameterInfo const&);
size_t hash_value(MapsParameterInfo const&);
-// A descriptor for map checks.
+// A descriptor for map checks. The {feedback} parameter is optional.
+// If {feedback} references a valid CallIC slot and this MapCheck fails,
+// then speculation on that CallIC slot will be disabled.
class CheckMapsParameters final {
public:
- CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
- : flags_(flags), maps_info_(maps) {}
+ CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps,
+ const VectorSlotPair& feedback)
+ : flags_(flags), maps_info_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
ZoneHandleSet<Map> const& maps() const { return maps_info_.maps(); }
MapsParameterInfo const& maps_info() const { return maps_info_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
CheckMapsFlags const flags_;
MapsParameterInfo const maps_info_;
+ VectorSlotPair const feedback_;
};
bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
@@ -197,7 +274,29 @@ inline size_t hash_value(GrowFastElementsMode mode) {
std::ostream& operator<<(std::ostream&, GrowFastElementsMode);
-GrowFastElementsMode GrowFastElementsModeOf(const Operator*) WARN_UNUSED_RESULT;
+class GrowFastElementsParameters {
+ public:
+ GrowFastElementsParameters(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ GrowFastElementsMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ GrowFastElementsMode mode_;
+ VectorSlotPair feedback_;
+};
+
+bool operator==(const GrowFastElementsParameters&,
+ const GrowFastElementsParameters&);
+
+inline size_t hash_value(const GrowFastElementsParameters&);
+
+std::ostream& operator<<(std::ostream&, const GrowFastElementsParameters&);
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
class ElementsTransition final {
@@ -270,6 +369,8 @@ class AllocateParameters {
PretenureFlag pretenure_;
};
+bool IsCheckedWithFeedback(const Operator* op);
+
size_t hash_value(AllocateParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
@@ -282,7 +383,7 @@ Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
-BailoutReason BailoutReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+AbortReason AbortReasonOf(const Operator* op) WARN_UNUSED_RESULT;
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -364,6 +465,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberTrunc();
const Operator* NumberToBoolean();
const Operator* NumberToInt32();
+ const Operator* NumberToString();
const Operator* NumberToUint32();
const Operator* NumberToUint8Clamped();
@@ -402,9 +504,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* SeqStringCharCodeAt();
+ const Operator* StringCodePointAt();
+ const Operator* SeqStringCodePointAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
@@ -435,49 +540,52 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
- const Operator* CheckBounds();
- const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
const Operator* MaskIndexWithBound();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
+ const Operator* CheckBounds(const VectorSlotPair& feedback);
+ const Operator* CheckEqualsInternalizedString();
+ const Operator* CheckEqualsSymbol();
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
const Operator* CheckHeapObject();
+ const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
const Operator* CheckInternalizedString();
- const Operator* CheckNumber();
- const Operator* CheckSmi();
- const Operator* CheckString();
+ const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
+ const VectorSlotPair& = VectorSlotPair());
+ const Operator* CheckNotTaggedHole();
+ const Operator* CheckNumber(const VectorSlotPair& feedback);
+ const Operator* CheckReceiver();
const Operator* CheckSeqString();
+ const Operator* CheckSmi(const VectorSlotPair& feedback);
+ const Operator* CheckString(const VectorSlotPair& feedback);
const Operator* CheckSymbol();
- const Operator* CheckReceiver();
+ const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32Add();
- const Operator* CheckedInt32Sub();
const Operator* CheckedInt32Div();
const Operator* CheckedInt32Mod();
- const Operator* CheckedUint32Div();
- const Operator* CheckedUint32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
- const Operator* CheckedInt32ToTaggedSigned();
- const Operator* CheckedUint32ToInt32();
- const Operator* CheckedUint32ToTaggedSigned();
- const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
- const Operator* CheckedTaggedSignedToInt32();
- const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
+ const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
- const Operator* CheckedTaggedToTaggedSigned();
- const Operator* CheckedTaggedToTaggedPointer();
- const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedUint32Div();
+ const Operator* CheckedUint32Mod();
+ const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
+ const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode);
- const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
- const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
- const Operator* CheckEqualsInternalizedString();
- const Operator* CheckEqualsSymbol();
-
const Operator* ObjectIsArrayBufferView();
const Operator* ObjectIsBigInt();
const Operator* ObjectIsCallable();
@@ -493,6 +601,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsSymbol();
const Operator* ObjectIsUndetectable();
+ const Operator* NumberIsFloat64Hole();
+
const Operator* ArgumentsFrame();
const Operator* ArgumentsLength(int formal_parameter_count,
bool is_rest_length);
@@ -503,6 +613,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// new-arguments-elements arguments-frame, arguments-length
const Operator* NewArgumentsElements(int mapped_count);
+ // new-cons-string length, first, second
+ const Operator* NewConsString();
+
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
@@ -510,7 +623,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* EnsureWritableFastElements();
// maybe-grow-fast-elements object, elements, index, length
- const Operator* MaybeGrowFastElements(GrowFastElementsMode mode);
+ const Operator* MaybeGrowFastElements(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback);
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
@@ -549,7 +663,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StoreTypedElement(ExternalArrayType const&);
// Abort (for terminating execution on internal error).
- const Operator* RuntimeAbort(BailoutReason reason);
+ const Operator* RuntimeAbort(AbortReason reason);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 30586f307c..26c47e0cb5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -109,7 +109,7 @@ int StateValuesHashKey(Node** nodes, size_t count) {
for (size_t i = 0; i < count; i++) {
hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
}
- return static_cast<int>(hash & 0x7fffffff);
+ return static_cast<int>(hash & 0x7FFFFFFF);
}
} // namespace
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 1ed12d245b..672acb203d 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -326,13 +326,11 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
- return node->opcode() == IrOpcode::kCheckedLoad ||
- node->opcode() == IrOpcode::kLoadElement ||
+ return node->opcode() == IrOpcode::kLoadElement ||
node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kEffectPhi ||
node->opcode() == IrOpcode::kStoreElement ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kUnsafePointerAdd ||
node->opcode() == IrOpcode::kRetain;
}
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 346aa47bfc..428688abde 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -44,6 +44,8 @@ class TypeCache final {
Type* const kSingletonOne = CreateRange(1.0, 1.0);
Type* const kSingletonTen = CreateRange(10.0, 10.0);
Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+ Type* const kZeroOrMinusZero =
+ Type::Union(kSingletonZero, Type::MinusZero(), zone());
Type* const kZeroOrUndefined =
Type::Union(kSingletonZero, Type::Undefined(), zone());
Type* const kTenOrUndefined =
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 605a96c944..12c9a194b8 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -254,6 +254,9 @@ class Typer::Visitor : public Reducer {
Type* TypeUnaryOp(Node* node, UnaryTyperFun);
Type* TypeBinaryOp(Node* node, BinaryTyperFun);
+ static Type* BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f);
+
enum ComparisonOutcomeFlags {
kComparisonTrue = 1,
kComparisonFalse = 2,
@@ -399,7 +402,6 @@ Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
return input->IsNone() ? Type::None() : f(input, typer_);
}
-
Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type* left = Operand(node, 0);
Type* right = Operand(node, 1);
@@ -407,6 +409,23 @@ Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
: f(left, right, typer_);
}
+Type* Typer::Visitor::BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f) {
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ bool lhs_is_number = lhs->Is(Type::Number());
+ bool rhs_is_number = rhs->Is(Type::Number());
+ if (lhs_is_number && rhs_is_number) {
+ return f(lhs, rhs, t);
+ }
+ if (lhs_is_number || rhs_is_number) {
+ return Type::Number();
+ }
+ if (lhs->Is(Type::BigInt()) || rhs->Is(Type::BigInt())) {
+ return Type::BigInt();
+ }
+ return Type::Numeric();
+}
Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
ComparisonOutcome outcome, Typer* t) {
@@ -417,7 +436,6 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
return result;
}
-
Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
if ((outcome & kComparisonFalse) != 0 ||
(outcome & kComparisonUndefined) != 0) {
@@ -947,7 +965,7 @@ Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
Type* Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
-Type* Typer::Visitor::TypeUnreachable(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeUnreachable(Node* node) { return Type::None(); }
// JS comparison operators.
@@ -1052,53 +1070,23 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseOr(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseOr);
}
-
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseAnd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseAnd);
}
-
Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseXor(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseXor);
}
-
Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftLeft(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftLeft);
}
-
Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftRight(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftRight);
}
@@ -1120,51 +1108,27 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
}
}
// The addition must be numeric.
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberAdd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberAdd);
}
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberSubtract(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberSubtract);
}
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberMultiply(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberMultiply);
}
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberDivide(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberDivide);
}
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberModulus(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberModulus);
}
Type* Typer::Visitor::JSExponentiateTyper(Type* lhs, Type* rhs, Typer* t) {
+ // TODO(neis): Refine using BinaryNumberOpTyper?
return Type::Numeric();
}
@@ -1556,7 +1520,17 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kDateGetTime:
return t->cache_.kJSDateValueType;
+ // Symbol functions.
+ case kSymbolConstructor:
+ return Type::Symbol();
+
+ // BigInt functions.
+ case kBigIntConstructor:
+ return Type::BigInt();
+
// Number functions.
+ case kNumberConstructor:
+ return Type::Number();
case kNumberIsFinite:
case kNumberIsInteger:
case kNumberIsNaN:
@@ -1570,6 +1544,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
return Type::String();
// String functions.
+ case kStringConstructor:
+ return Type::String();
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
@@ -1850,6 +1826,10 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
return Type::Any();
}
+Type* Typer::Visitor::TypeJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ return Type::Any();
+}
+
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
@@ -1968,6 +1948,14 @@ Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
+Type* Typer::Visitor::TypeStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
+Type* Typer::Visitor::TypeSeqStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
@@ -1976,7 +1964,13 @@ Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromCodePointTyper);
}
-Type* Typer::Visitor::TypeStringIndexOf(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+ return Type::Range(-1.0, String::kMaxLength, zone());
+}
+
+Type* Typer::Visitor::TypeStringLength(Node* node) {
+ return typer_->cache_.kStringLengthType;
+}
Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
@@ -2151,6 +2145,10 @@ Type* Typer::Visitor::TypeObjectIsMinusZero(Node* node) {
return TypeUnaryOp(node, ObjectIsMinusZero);
}
+Type* Typer::Visitor::TypeNumberIsFloat64Hole(Node* node) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
return TypeUnaryOp(node, ObjectIsNaN);
}
@@ -2205,6 +2203,10 @@ Type* Typer::Visitor::TypeNewArgumentsElements(Node* node) {
return Type::OtherInternal();
}
+Type* Typer::Visitor::TypeNewConsString(Node* node) {
+ return Type::OtherNonSeqString();
+}
+
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 1b6ca6b53f..a3e90d579a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -309,6 +309,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
case CODE_DATA_CONTAINER_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index d791ec25c5..c4c371dab3 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -170,12 +170,11 @@ namespace compiler {
V(NumberOrHole, kNumber | kHole) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
kHole) \
- V(NumberOrString, kNumber | kString) \
V(NumericOrString, kNumeric | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(NumberOrUndefinedOrNullOrBoolean, \
kNumber | kNullOrUndefined | kBoolean) \
- V(PlainPrimitive, kNumberOrString | kBoolean | \
+ V(PlainPrimitive, kNumber | kString | kBoolean | \
kNullOrUndefined) \
V(Primitive, kSymbol | kBigInt | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e0c40df63b..a66a73f5d3 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -51,7 +51,7 @@ class Verifier::Visitor {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " should never have a type";
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeIs(Node* node, Type* type) {
@@ -62,7 +62,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeMaybe(Node* node, Type* type) {
@@ -73,7 +73,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
@@ -86,7 +86,7 @@ class Verifier::Visitor {
NodeProperties::GetType(input)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckOutput(Node* node, Node* use, int count, const char* kind) {
@@ -95,7 +95,7 @@ class Verifier::Visitor {
str << "GraphError: node #" << node->id() << ":" << *node->op()
<< " does not produce " << kind << " output used by node #"
<< use->id() << ":" << *use->op();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
};
@@ -236,10 +236,19 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Dead is never connected to the graph.
UNREACHABLE();
case IrOpcode::kDeadValue:
+ CheckValueInputIs(node, 0, Type::None());
CheckTypeIs(node, Type::None());
break;
case IrOpcode::kUnreachable:
- CheckNotTyped(node);
+ CheckTypeIs(node, Type::None());
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (NodeProperties::IsValueEdge(edge) && all.IsLive(use)) {
+ // {Unreachable} nodes can only be used by {DeadValue}, because they
+ // don't actually produce a value.
+ CHECK_EQ(IrOpcode::kDeadValue, use->opcode());
+ }
+ }
break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
@@ -826,6 +835,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Any());
break;
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ CheckTypeIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSStackCheck:
case IrOpcode::kJSDebugger:
// Type is empty.
@@ -984,6 +997,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Signed32());
break;
+ case IrOpcode::kNumberToString:
+ // Number -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kNumberToUint32:
case IrOpcode::kNumberToUint8Clamped:
// Number -> Unsigned32
@@ -1041,6 +1059,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
+ case IrOpcode::kStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
@@ -1058,6 +1088,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 2, Type::SignedSmall());
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kStringLength:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, TypeCache::Get().kStringLengthType);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl:
CheckValueInputIs(node, 0, Type::String());
@@ -1094,6 +1128,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ CheckValueInputIs(node, 0, Type::NumberOrHole());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kFindOrderedHashMapEntry:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::SignedSmall());
@@ -1122,6 +1160,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
Code::kMaxArguments, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
+ case IrOpcode::kNewConsString:
+ CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 1, Type::String());
+ CheckValueInputIs(node, 2, Type::String());
+ CheckTypeIs(node, Type::OtherString());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -1591,8 +1635,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
case IrOpcode::kAtomicExchange:
@@ -1602,6 +1644,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAtomicAnd:
case IrOpcode::kAtomicOr:
case IrOpcode::kAtomicXor:
+ case IrOpcode::kSpeculationFence:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index a04c7b3e5d..9bbf5f3a3f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -34,6 +34,8 @@
#include "src/log-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -68,6 +70,13 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
+bool ContainsSimd(wasm::FunctionSig* sig) {
+ for (wasm::ValueType t : sig->all()) {
+ if (t == wasm::kWasmS128) return true;
+ }
+ return false;
+}
+
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
@@ -79,21 +88,15 @@ WasmGraphBuilder::WasmGraphBuilder(
jsgraph_(jsgraph),
centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
env_(env),
- signature_tables_(zone),
function_tables_(zone),
- function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
+ has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
runtime_exception_support_(exception_support),
sig_(sig),
source_position_table_(source_position_table) {
- for (size_t i = sig->parameter_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetParam(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
- for (size_t i = sig->return_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetReturn(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
+ DCHECK_IMPLIES(use_trap_handler(), trap_handler::IsTrapHandlerEnabled());
DCHECK_NOT_NULL(jsgraph_);
}
@@ -561,9 +564,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->Float64Sqrt();
break;
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input, position);
+ return BuildI32SConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF64:
+ return BuildI32SConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input, position);
+ return BuildI32UConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF64:
+ return BuildI32UConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF64:
return BuildI32AsmjsSConvertF64(input);
case wasm::kExprI32AsmjsUConvertF64:
@@ -584,9 +593,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input, position);
+ return BuildI32SConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF32:
+ return BuildI32SConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input, position);
+ return BuildI32UConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF32:
+ return BuildI32UConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF32:
return BuildI32AsmjsSConvertF32(input);
case wasm::kExprI32AsmjsUConvertF32:
@@ -964,7 +979,7 @@ Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
}
Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
- static const int32_t kMask32 = 0x1f;
+ static const int32_t kMask32 = 0x1F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
@@ -980,7 +995,7 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
}
Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
- static const int64_t kMask64 = 0x3f;
+ static const int64_t kMask64 = 0x3F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
@@ -1009,9 +1024,8 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
-Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
- MachineType memtype,
- wasm::ValueType wasmtype) {
+Node* WasmGraphBuilder::BuildChangeEndiannessStore(
+ Node* node, MachineRepresentation mem_rep, wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1040,23 +1054,22 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
break;
}
- if (memtype.representation() == MachineRepresentation::kWord8) {
+ if (mem_rep == MachineRepresentation::kWord8) {
// No need to change endianness for byte size, return original node
return node;
}
- if (wasmtype == wasm::kWasmI64 &&
- memtype.representation() < MachineRepresentation::kWord64) {
+ if (wasmtype == wasm::kWasmI64 && mem_rep < MachineRepresentation::kWord64) {
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
valueSizeInBytes = 1 << ElementSizeLog2Of(wasm::kWasmI32);
valueSizeInBits = 8 * valueSizeInBytes;
- if (memtype.representation() == MachineRepresentation::kWord16) {
+ if (mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
} else if (wasmtype == wasm::kWasmI32 &&
- memtype.representation() == MachineRepresentation::kWord16) {
+ mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
@@ -1325,7 +1338,7 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
wasm::kExprF32ReinterpretI32,
Binop(wasm::kExprI32Ior,
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
- jsgraph()->Int32Constant(0x7fffffff)),
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
jsgraph()->Int32Constant(0x80000000))));
@@ -1338,7 +1351,7 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
wasm::kExprF64ReinterpretI64,
Binop(wasm::kExprI64Ior,
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
- jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ jsgraph()->Int64Constant(0x7FFFFFFFFFFFFFFF)),
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
jsgraph()->Int64Constant(0x8000000000000000))));
@@ -1350,78 +1363,177 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
Node* high_word_right =
graph()->NewNode(m->Float64ExtractHighWord32(), right);
- Node* new_high_word =
- Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
- jsgraph()->Int32Constant(0x7fffffff)),
- Binop(wasm::kExprI32And, high_word_right,
- jsgraph()->Int32Constant(0x80000000)));
+ Node* new_high_word = Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
#endif
}
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
+// Helper classes for float to int conversions.
+struct WasmGraphBuilder::IntConvertOps {
+ MachineRepresentation word_rep() const {
+ return MachineRepresentation::kWord32;
+ }
+ Node* zero() const { return builder_->Int32Constant(0); }
+ virtual Node* min() const = 0;
+ virtual Node* max() const = 0;
+ virtual ~IntConvertOps() = default;
+
+ protected:
+ explicit IntConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IntConvertOps);
+};
+
+struct I32SConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32SConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32SConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32SConvertOps);
+};
+
+struct I32UConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32UConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32UConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32UConvertOps);
+};
+
+struct WasmGraphBuilder::FloatConvertOps {
+ virtual Node* zero() const = 0;
+ virtual wasm::WasmOpcode trunc_op() const = 0;
+ virtual wasm::WasmOpcode ne_op() const = 0;
+ virtual wasm::WasmOpcode lt_op() const = 0;
+ virtual ~FloatConvertOps() = default;
+
+ protected:
+ explicit FloatConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FloatConvertOps);
+};
+
+struct F32ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F32ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F32ConvertOps() = default;
+ Node* zero() const { return builder_->Float32Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF32Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF32Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF32Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F32ConvertOps);
+};
+
+struct F64ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F64ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F64ConvertOps() = default;
+ Node* zero() const { return builder_->Float64Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF64Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF64Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF64Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F64ConvertOps);
+};
+
+Node* WasmGraphBuilder::BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
+ switch (impl) {
+ case NumericImplementation::kTrap:
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, test, position);
+ return result;
+ case NumericImplementation::kSaturate: {
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(*control_);
+ Diamond nan_d(graph(), jsgraph()->common(),
+ Binop(float_ops->ne_op(), input, input), // Checks if NaN.
+ BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Diamond sat_d(graph(), jsgraph()->common(),
+ Binop(float_ops->lt_op(), input, float_ops->zero()),
+ BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ops->word_rep(), int_ops->min(), int_ops->max());
+ Node* nan_val = nan_d.Phi(int_ops->word_rep(), int_ops->zero(), sat_val);
+ return tl_d.Phi(int_ops->word_rep(), nan_val, result);
+ }
+ }
+ UNREACHABLE();
+}
+
+Node* WasmGraphBuilder::BuildI32ConvertOp(
+ Node* input, wasm::WasmCodePosition position, NumericImplementation impl,
+ const Operator* op, wasm::WasmOpcode check_op, const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
// Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
+ Node* trunc = Unop(float_ops->trunc_op(), input);
+ Node* result = graph()->NewNode(op, trunc);
// Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ // truncated input value, then there has been an overflow and we
+ // trap/saturate.
+ Node* check = Unop(check_op, result);
+ Node* overflow = Binop(float_ops->ne_op(), trunc, check);
+ return BuildConvertCheck(overflow, result, input, position, impl, int_ops,
+ float_ops);
+}
- return result;
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToInt32(),
+ wasm::kExprF32SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->ChangeFloat64ToInt32(),
+ wasm::kExprF64SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
-
- // Convert the result back to f32. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToUint32(),
+ wasm::kExprF32UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat64ToUint32(),
+ wasm::kExprF64UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
@@ -1861,8 +1973,7 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, index);
@@ -1961,8 +2072,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
values[i] = value;
}
@@ -2330,7 +2440,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context_;
+ args[1] = wasm_context_.get();
// Add effect and control inputs.
args[params + 2] = *effect_;
@@ -2364,7 +2474,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (FLAG_wasm_jit_to_native) {
- // Simply encode the index of the target.
+ // Just encode the function index. This will be patched at instantiation.
Address code = reinterpret_cast<Address>(index);
args[0] = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(code), RelocInfo::WASM_CALL);
@@ -2396,45 +2506,39 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* key = args[0];
// Bounds check against the table size.
- Node* size = function_table_sizes_[table_index];
+ Node* size = function_tables_[table_index].size;
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- Node* table_address = function_tables_[table_index];
+ Node* table_address = function_tables_[table_index].table_addr;
Node* table = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
jsgraph()->IntPtrConstant(0), *effect_, *control_);
- Node* signatures_address = signature_tables_[table_index];
- Node* signatures = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), signatures_address,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
- // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
- {
- Node* load_sig = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), signatures,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Int32Constant(fixed_offset)),
- *effect_, *control_);
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- CHECK_GE(sig_index, 0);
- Node* sig_match =
- graph()->NewNode(machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(canonical_sig_num));
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- }
+ Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2 + 1));
+ Node* load_sig =
+ graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ CHECK_GE(sig_index, 0);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(canonical_sig_num));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
Node* entry = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Uint32Constant(fixed_offset)),
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
if (FLAG_wasm_jit_to_native) {
Node* address = graph()->NewNode(
@@ -2715,12 +2819,8 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeUint32ToSmi(Node* value) {
- if (jsgraph()->machine()->Is64()) {
- value =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), value);
- }
- return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
- BuildSmiShiftBitsConstant());
+ return graph()->NewNode(jsgraph()->machine()->WordShl(),
+ Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
@@ -2826,7 +2926,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2841,7 +2941,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -3177,7 +3277,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3232,7 +3332,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory start.
Node* mem_start = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_start))),
*effect_, *control_);
@@ -3241,7 +3341,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory size.
Node* mem_size = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_size))),
*effect_, *control_);
@@ -3251,7 +3351,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
if (untrusted_code_mitigations_) {
// Load the memory mask.
Node* mem_mask = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
*effect_, *control_);
@@ -3353,12 +3453,12 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
// possible to express in the graph, and would essentially constitute a
// "mem2reg" optimization in TurboFan.
globals_start_ = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, globals_start))),
graph()->start(), graph()->start());
}
- *base_node = globals_start_;
+ *base_node = globals_start_.get();
*offset_node = jsgraph()->Int32Constant(offset);
if (mem_type == MachineType::Simd128() && offset != 0) {
@@ -3392,7 +3492,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
}
return graph()->NewNode(
jsgraph()->machine()->Word32Shr(), mem_size,
- jsgraph()->Int32Constant(WhichPowerOf2(wasm::WasmModule::kPageSize)));
+ jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
@@ -3401,25 +3501,21 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
for (size_t i = 0; i < tables_size; ++i) {
wasm::GlobalHandleAddress function_handle_address =
env_->function_tables[i];
- wasm::GlobalHandleAddress signature_handle_address =
- env_->signature_tables[i];
- function_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
+ Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
- signature_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(signature_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
+ RelocInfo::WASM_GLOBAL_HANDLE);
uint32_t table_size = env_->module->function_tables[i].initial_size;
- function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+ Node* size = jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ function_tables_.push_back({table_addr, size});
}
}
Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
- if (!trap_handler::UseTrapHandler()) {
+ if (!use_trap_handler()) {
return *control_;
}
@@ -3507,46 +3603,55 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
return node;
}
-Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
- if (FLAG_wasm_no_bounds_checks) return index;
+ if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
DCHECK_NOT_NULL(context_cache_);
Node* mem_size = context_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
auto m = jsgraph()->machine();
- if (trap_handler::UseTrapHandler() && enforce_check == kCanOmitBoundsCheck) {
+ if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
// Simply zero out the 32-bits on 64-bit targets and let the trap handler
// do its job.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
- uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
+ uint32_t min_size = env_->module->initial_pages * wasm::kWasmPageSize;
uint32_t max_size =
(env_->module->has_maximum_pages ? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize;
-
- byte access_size = wasm::WasmOpcodes::MemSize(memtype);
+ wasm::kWasmPageSize;
if (access_size > max_size || offset > max_size - access_size) {
// The access will be out of bounds, even for the largest memory.
- TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
- position);
+ TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return jsgraph()->IntPtrConstant(0);
}
- uint32_t end_offset = offset + access_size;
-
- if (end_offset > min_size) {
+ DCHECK_LE(1, access_size);
+ // This computation cannot overflow, since
+ // {offset <= max_size - access_size <= kMaxUint32 - access_size}.
+ // It also cannot underflow, since {access_size >= 1}.
+ uint32_t end_offset = offset + access_size - 1;
+ Node* end_offset_node = Int32Constant(end_offset);
+
+ // The accessed memory is [index + offset, index + end_offset].
+ // Check that the last read byte (at {index + end_offset}) is in bounds.
+ // 1) Check that {end_offset < mem_size}. This also ensures that we can safely
+ // compute {effective_size} as {mem_size - end_offset)}.
+ // {effective_size} is >= 1 if condition 1) holds.
+ // 2) Check that {index + end_offset < mem_size} by
+ // - computing {effective_size} as {mem_size - end_offset} and
+ // - checking that {index < effective_size}.
+
+ if (end_offset >= min_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the actual memory size, which
// is not known at compile time.
- Node* cond =
- graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(end_offset), mem_size);
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
+ end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is within the bounds of the smallest memory, so only
@@ -3554,22 +3659,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
Uint32Matcher match(index);
if (match.HasValue()) {
uint32_t index_val = match.Value();
- if (index_val <= min_size - end_offset) {
+ if (index_val < min_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
}
}
- // Compute the effective size of the memory, which is the size of the memory
- // minus the statically known offset, minus the byte size of the access minus
- // one.
- // This produces a positive number since {end_offset <= min_size <= mem_size}.
- Node* effective_size =
- graph()->NewNode(jsgraph()->machine()->Int32Sub(), mem_size,
- jsgraph()->Int32Constant(end_offset - 1));
+ // This produces a positive number, since {end_offset < min_size <= mem_size}.
+ Node* effective_size = graph()->NewNode(jsgraph()->machine()->Int32Sub(),
+ mem_size, end_offset_node);
// Introduce the actual bounds check.
Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
@@ -3581,7 +3681,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
DCHECK_NOT_NULL(mem_mask);
index = graph()->NewNode(m->Word32And(), index, mem_mask);
}
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) : index;
+ return Uint32ToUintptr(index);
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3609,21 +3709,28 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
+ int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
+ Node* info = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign));
+
Node* address = graph()->NewNode(jsgraph()->machine()->Int32Add(),
Int32Constant(offset), index);
- Node* addr_low = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32And(), address, Int32Constant(0xffff)));
- Node* addr_high = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32Shr(), address, Int32Constant(16)));
- int32_t rep_i = static_cast<int32_t>(rep);
- Node* params[] = {
- jsgraph()->SmiConstant(is_store), // is_store
- jsgraph()->SmiConstant(rep_i), // mem rep
- addr_low, // address lower half word
- addr_high // address higher half word
+ auto store = [&](int offset, MachineRepresentation rep, Node* data) {
+ *effect_ = graph()->NewNode(
+ jsgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ info, jsgraph()->Int32Constant(offset), data, *effect_, *control_);
};
- Node* call =
- BuildCallToRuntime(Runtime::kWasmTraceMemory, params, arraysize(params));
+ // Store address, is_store, and mem_rep.
+ store(offsetof(wasm::MemoryTracingInfo, address),
+ MachineRepresentation::kWord32, address);
+ store(offsetof(wasm::MemoryTracingInfo, is_store),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(is_store ? 1 : 0));
+ store(offsetof(wasm::MemoryTracingInfo, mem_rep),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(static_cast<int>(rep)));
+
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceMemory, &info, 1);
SetSourcePosition(call, position);
return call;
}
@@ -3636,11 +3743,12 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset,
+ position, kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
+ if (use_trap_handler()) {
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
SetSourcePosition(load, position);
@@ -3650,7 +3758,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
+ DCHECK(!use_trap_handler());
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -3682,35 +3790,36 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
uint32_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position,
wasm::ValueType type) {
Node* store;
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset,
+ position, kCanOmitBoundsCheck);
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndiannessStore(val, memtype, type);
+ val = BuildChangeEndiannessStore(val, mem_rep, type);
#endif
- if (memtype.representation() == MachineRepresentation::kWord8 ||
- jsgraph()->machine()->UnalignedStoreSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
- store = graph()->NewNode(
- jsgraph()->machine()->ProtectedStore(memtype.representation()),
- MemBuffer(offset), index, val, *effect_, *control_);
+ if (mem_rep == MachineRepresentation::kWord8 ||
+ jsgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
+ if (use_trap_handler()) {
+ store =
+ graph()->NewNode(jsgraph()->machine()->ProtectedStore(mem_rep),
+ MemBuffer(offset), index, val, *effect_, *control_);
SetSourcePosition(store, position);
} else {
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ StoreRepresentation rep(mem_rep, kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
- UnalignedStoreRepresentation rep(memtype.representation());
+ DCHECK(!use_trap_handler());
+ UnalignedStoreRepresentation rep(mem_rep);
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
MemBuffer(offset), index, val, *effect_, *control_);
@@ -3719,8 +3828,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
*effect_ = store;
if (FLAG_wasm_trace_memory) {
- TraceMemoryOperation(true, memtype.representation(), index, offset,
- position);
+ TraceMemoryOperation(true, mem_rep, index, offset, position);
}
return store;
@@ -3772,10 +3880,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
Node* load = graph()->NewNode(jsgraph()->machine()->Load(type), mem_start,
index, *effect_, bounds_check.if_true);
Node* value_phi =
@@ -3788,6 +3893,11 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
return value_phi;
}
+Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
+ if (jsgraph()->machine()->Is32()) return node;
+ return graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), node);
+}
+
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
DCHECK_NOT_NULL(context_cache_);
@@ -3814,10 +3924,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
const Operator* store_op = jsgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
@@ -4302,22 +4409,24 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
@@ -4326,26 +4435,28 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
#undef BUILD_ATOMIC_TERNARY_OP
-#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, *effect_, *control_); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4391,7 +4502,8 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
WasmCodeWrapper wasm_code, uint32_t index,
- Address wasm_context_address) {
+ Address wasm_context_address,
+ bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
@@ -4410,15 +4522,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Node* effect = nullptr;
// TODO(titzer): compile JS to WASM wrappers without a {ModuleEnv}.
- ModuleEnv env = {
- module,
- std::vector<Address>(), // function_tables
- std::vector<Address>(), // signature_tables
- // TODO(mtrofin): remove these 2 lines when we don't need
- // FLAG_wasm_jit_to_native
- std::vector<Handle<Code>>(), // function_code
- BUILTIN_CODE(isolate, Illegal) // default_function_code
- };
+ ModuleEnv env(module,
+ // TODO(mtrofin): remove the Illegal builtin when we don't need
+ // FLAG_wasm_jit_to_native
+ BUILTIN_CODE(isolate, Illegal), // default_function_code
+ use_trap_handler);
WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), func->sig);
@@ -4470,9 +4578,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
namespace {
void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
-#if !DEBUG
- return;
-#endif
+#ifdef DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -4493,7 +4599,7 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
default:
UNREACHABLE();
}
- CHECK_NOT_NULL(target);
+ DCHECK_NOT_NULL(target);
bool is_immovable =
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
bool is_allowed_stub = false;
@@ -4503,15 +4609,16 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
code->kind() == Code::STUB &&
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
}
- CHECK(is_immovable || is_allowed_stub);
+ DCHECK(is_immovable || is_allowed_stub);
}
+#endif
}
} // namespace
Handle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
- uint32_t index, wasm::ModuleOrigin origin,
+ uint32_t index, wasm::ModuleOrigin origin, bool use_trap_handler,
Handle<FixedArray> global_js_imports_table) {
//----------------------------------------------------------------------------
// Create the Graph
@@ -4532,7 +4639,8 @@ Handle<Code> CompileWasmToJSWrapper(
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ ModuleEnv env(nullptr, Handle<Code>::null(), use_trap_handler);
+ WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), sig,
source_position_table);
builder.set_control_ptr(&control);
@@ -4618,7 +4726,10 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, Handle<Code>(), sig);
+ ModuleEnv env(
+ nullptr, Handle<Code>::null(),
+ !target.IsCodeObject() && target.GetWasmCode()->HasTrapHandlerIndex());
+ WasmGraphBuilder builder(&env, &zone, &jsgraph, Handle<Code>(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToWasmWrapper(target, new_wasm_context_address);
@@ -4804,13 +4915,6 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
-#if DEBUG
- if (env_) {
- size_t tables_size = env_->module->function_tables.size();
- DCHECK_EQ(tables_size, env_->function_tables.size());
- DCHECK_EQ(tables_size, env_->signature_tables.size());
- }
-#endif
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -4825,7 +4929,6 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
runtime_exception_support_);
tf_.graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
-
if (tf_.graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
@@ -4844,7 +4947,8 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
if (func_index_ >= FLAG_trace_wasm_ast_start &&
func_index_ < FLAG_trace_wasm_ast_end) {
- PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module);
+ PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module,
+ wasm::kPrintLocals);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -4857,9 +4961,7 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
if (!name.is_empty()) {
return name;
}
-#ifndef DEBUG
- return {};
-#endif
+#ifdef DEBUG
constexpr int kBufferLength = 15;
EmbeddedVector<char, kBufferLength> name_vector;
@@ -4869,6 +4971,9 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
char* index_name = zone->NewArray<char>(name_len);
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
+#else
+ return {};
+#endif
}
} // namespace
@@ -5090,7 +5195,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
func_index_,
tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- protected_instructions_);
+ std::move(protected_instructions_));
if (!code) {
return WasmCodeWrapper(code);
}
@@ -5107,13 +5212,24 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
MaybeHandle<HandlerTable> handler_table =
tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
- int function_index_as_int = static_cast<int>(func_index_);
native_module_->compiled_module()->source_positions()->set(
- function_index_as_int, *source_positions);
+ func_index_, *source_positions);
if (!handler_table.is_null()) {
native_module_->compiled_module()->handler_table()->set(
- function_index_as_int, *handler_table.ToHandleChecked());
+ func_index_, *handler_table.ToHandleChecked());
}
+
+#ifdef ENABLE_DISASSEMBLER
+ // Note: only do this after setting source positions, as this will be
+ // accessed and printed here.
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ PrintF("--- Native Wasm code ---\n");
+ code->Print(isolate_);
+ PrintF("--- End code ---\n");
+ }
+#endif
+
// TODO(mtrofin): this should probably move up in the common caller,
// once liftoff has source positions. Until then, we'd need to handle
// undefined values, which is complicating the code.
@@ -5147,21 +5263,21 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
wasm::ErrorThrower* thrower) {
CodeDesc desc;
liftoff_.asm_.GetCode(isolate_, &desc);
+
+ Handle<ByteArray> source_positions =
+ liftoff_.source_position_table_builder_.ToSourcePositionTable(isolate_);
+
WasmCodeWrapper ret;
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code;
- code = isolate_->factory()->NewCode(desc, Code::WASM_FUNCTION, code);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- OFStream os(stdout);
- os << "--- Wasm liftoff code ---\n";
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(SNPrintF(func_name, "wasm#%d-liftoff", func_index_));
- code->Disassemble(func_name.start(), os);
- os << "--- End code ---\n";
- }
-#endif
+ code = isolate_->factory()->NewCode(
+ desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
+ MaybeHandle<HandlerTable>(), source_positions,
+ MaybeHandle<DeoptimizationData>(), kMovable,
+ 0, // stub_key
+ false, // is_turbofanned
+ liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
+ liftoff_.safepoint_table_offset_);
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
@@ -5169,15 +5285,34 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
}
PackProtectedInstructions(code);
- return WasmCodeWrapper(code);
+ ret = WasmCodeWrapper(code);
} else {
- // TODO(mtrofin): figure a way to raise events; also, disassembly.
- // Consider lifting them both to FinishCompilation.
- return WasmCodeWrapper(native_module_->AddCode(
- desc, liftoff_.asm_.GetTotalFrameSlotCount(), func_index_,
- liftoff_.asm_.GetSafepointTableOffset(), protected_instructions_,
- true));
+ // TODO(mtrofin): figure a way to raise events.
+ // Consider lifting it to FinishCompilation.
+ native_module_->compiled_module()->source_positions()->set(
+ func_index_, *source_positions);
+ ret = WasmCodeWrapper(
+ native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
+ func_index_, liftoff_.safepoint_table_offset_,
+ std::move(protected_instructions_), true));
}
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ OFStream os(stdout);
+ os << "--- Wasm liftoff code ---\n";
+ EmbeddedVector<char, 64> func_name;
+ if (func_name_.start() != nullptr) {
+ SNPrintF(func_name, "#%d:%.*s", func_index(), func_name_.length(),
+ func_name_.start());
+ } else {
+ SNPrintF(func_name, "wasm#%d", func_index());
+ }
+ ret.Disassemble(func_name.start(), isolate_, os);
+ os << "--- End code ---\n";
+ }
+#endif
+ return ret;
}
// static
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 146f3044ca..22a2e1071e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -43,25 +43,34 @@ class WasmCode;
namespace compiler {
+// Indirect function tables contain a <smi(sig), code> pair for each entry.
+enum FunctionTableEntries : int {
+ kFunctionTableSignatureOffset = 0,
+ kFunctionTableCodeOffset = 1,
+ kFunctionTableEntrySize = 2
+};
+constexpr inline int FunctionTableSigOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableSignatureOffset;
+}
+constexpr inline int FunctionTableCodeOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableCodeOffset;
+}
+
// The {ModuleEnv} encapsulates the module data that is used by the
// {WasmGraphBuilder} during graph building. It represents the parameters to
// which the compiled code should be specialized, including which code to call
// for direct calls {function_code}, which tables to use for indirect calls
// {function_tables}, memory start address and size {mem_start, mem_size},
-// as well as signature maps {signature_maps} and the module itself {module}.
+// as well as the module itself {module}.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
// A pointer to the decoded module's static representation.
const wasm::WasmModule* module;
- // The function tables are FixedArrays of code used to dispatch indirect
- // calls. (the same length as module.function_tables). We use the address
- // to a global handle to the FixedArray.
+ // The function tables are FixedArrays of <smi, code> pairs used to signature
+ // check and dispatch indirect calls. It has the same length as
+ // module.function_tables. We use the address to a global handle to the
+ // FixedArray.
const std::vector<Address> function_tables;
- // The signatures tables are FixedArrays of SMIs used to check signatures
- // match at runtime.
- // (the same length as module.function_tables)
- // We use the address to a global handle to the FixedArray.
- const std::vector<Address> signature_tables;
// TODO(mtrofin): remove these 2 once we don't need FLAG_wasm_jit_to_native
// Contains the code objects to call for each direct call.
@@ -69,6 +78,25 @@ struct ModuleEnv {
const std::vector<Handle<Code>> function_code;
// If the default code is not a null handle, always use it for direct calls.
const Handle<Code> default_function_code;
+ // True if trap handling should be used in compiled code, rather than
+ // compiling in bounds checks for each memory access.
+ const bool use_trap_handler;
+
+ ModuleEnv(const wasm::WasmModule* module, Handle<Code> default_function_code,
+ bool use_trap_handler)
+ : module(module),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
+
+ ModuleEnv(const wasm::WasmModule* module,
+ std::vector<Address> function_tables,
+ std::vector<Handle<Code>> function_code,
+ Handle<Code> default_function_code, bool use_trap_handler)
+ : module(module),
+ function_tables(std::move(function_tables)),
+ function_code(std::move(function_code)),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
};
enum RuntimeExceptionSupport : bool {
@@ -114,6 +142,11 @@ class WasmCompilationUnit final {
struct LiftoffData {
wasm::LiftoffAssembler asm_;
+ int safepoint_table_offset_;
+ SourcePositionTableBuilder source_position_table_builder_;
+ // The {codegen_zone_} needs to survive until FinishCompilation. It's only
+ // rarely used (e.g. for runtime calls), so it's only allocated when needed.
+ std::unique_ptr<Zone> codegen_zone_;
explicit LiftoffData(Isolate* isolate) : asm_(isolate) {}
};
struct TurbofanData {
@@ -151,7 +184,7 @@ class WasmCompilationUnit final {
size_t memory_cost_ = 0;
wasm::NativeModule* native_module_;
bool lower_simd_;
- std::shared_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
protected_instructions_;
CompilationMode mode_;
// {liftoff_} is valid if mode_ == kLiftoff, tf_ if mode_ == kTurbofan.
@@ -172,12 +205,13 @@ class WasmCompilationUnit final {
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
wasm::ModuleOrigin origin,
+ bool use_trap_handler,
Handle<FixedArray> global_js_imports_table);
// Wraps a given wasm code object, producing a code object.
V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(
Isolate* isolate, wasm::WasmModule* module, WasmCodeWrapper wasm_code,
- uint32_t index, Address wasm_context_address);
+ uint32_t index, Address wasm_context_address, bool use_trap_handler);
// Wraps a wasm function, producing a code object that can be called from other
// wasm instances (the WasmContext address must be changed).
@@ -221,6 +255,8 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
+ struct IntConvertOps;
+ struct FloatConvertOps;
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
@@ -351,7 +387,7 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint32_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
static void PrintDebugName(Node* node);
@@ -413,36 +449,43 @@ class WasmGraphBuilder {
const wasm::WasmModule* module() { return env_ ? env_->module : nullptr; }
+ bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+
private:
+ enum class NumericImplementation : uint8_t { kTrap, kSaturate };
static const int kDefaultBufferSize = 16;
- Zone* zone_;
- JSGraph* jsgraph_;
- Node* centry_stub_node_;
- ModuleEnv* env_ = nullptr;
- Node* wasm_context_ = nullptr;
- NodeVector signature_tables_;
- NodeVector function_tables_;
- NodeVector function_table_sizes_;
+ Zone* const zone_;
+ JSGraph* const jsgraph_;
+ Node* const centry_stub_node_;
+ // env_ == nullptr means we're not compiling Wasm functions, such as for
+ // wrappers or interpreter stubs.
+ ModuleEnv* const env_ = nullptr;
+ SetOncePointer<Node> wasm_context_;
+ struct FunctionTableNodes {
+ Node* table_addr;
+ Node* size;
+ };
+ ZoneVector<FunctionTableNodes> function_tables_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
WasmContextCacheNodes* context_cache_ = nullptr;
- Node* globals_start_ = nullptr;
+ SetOncePointer<Node> globals_start_;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
- bool untrusted_code_mitigations_ = true;
+ const bool untrusted_code_mitigations_ = true;
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
// be generated differently.
- RuntimeExceptionSupport runtime_exception_support_;
+ const RuntimeExceptionSupport runtime_exception_support_;
- wasm::FunctionSig* sig_;
+ wasm::FunctionSig* const sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
- compiler::SourcePositionTable* source_position_table_ = nullptr;
+ compiler::SourcePositionTable* const source_position_table_ = nullptr;
// Internal helper methods.
JSGraph* jsgraph() { return jsgraph_; }
@@ -451,11 +494,12 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
- Node* BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
- Node* BuildChangeEndiannessStore(Node* node, MachineType type,
+ Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
wasm::ValueType wasmtype = wasm::kWasmStmt);
@@ -470,10 +514,25 @@ class WasmGraphBuilder {
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
+
+ Node* BuildI32ConvertOp(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl, const Operator* op,
+ wasm::WasmOpcode check_op,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index e231d15f10..e7bb3c164a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ia32 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
-#define GP_RETURN_REGISTERS eax, edx, ecx
+#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == x64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
-#define GP_RETURN_REGISTERS rax, rdx, rcx
+#define GP_RETURN_REGISTERS rax, rdx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r3, r0, r1, r2
-#define GP_RETURN_REGISTERS r0, r1, r3
+#define GP_RETURN_REGISTERS r0, r1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
-#define GP_RETURN_REGISTERS x0, x1, x2
+#define GP_RETURN_REGISTERS x0, x1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3
-#define GP_RETURN_REGISTERS v0, v1, t7
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips64 =================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define GP_RETURN_REGISTERS v0, v1, t3
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ppc & ppc64 ============================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
-#define GP_RETURN_REGISTERS r3, r4, r5
+#define GP_RETURN_REGISTERS r3, r4
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
@@ -158,6 +158,8 @@ struct Allocator {
int stack_offset;
+ void AdjustStackOffset(int offset) { stack_offset += offset; }
+
LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
@@ -226,25 +228,28 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1);
- Allocator rets = return_registers;
-
- // Add return location(s).
- const int return_count = static_cast<int>(locations.return_count_);
- for (int i = 0; i < return_count; i++) {
- ValueType ret = fsig->GetReturn(i);
- locations.AddReturn(rets.Next(ret));
- }
-
+ // Add register and/or stack parameter(s).
Allocator params = parameter_registers;
- // Add parameter for the wasm_context.
+ // The wasm_context.
locations.AddParam(params.Next(MachineType::PointerRepresentation()));
- // Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
ValueType param = fsig->GetParam(i);
- locations.AddParam(params.Next(param));
+ auto l = params.Next(param);
+ locations.AddParam(l);
+ }
+
+ // Add return location(s).
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ ValueType ret = fsig->GetReturn(i);
+ auto l = rets.Next(ret);
+ locations.AddReturn(l);
}
const RegList kCalleeSaveRegisters = 0;
@@ -255,22 +260,23 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
: MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
CallDescriptor::Kind kind = FLAG_wasm_jit_to_native
? CallDescriptor::kCallWasmFunction
: CallDescriptor::kCallCodeObject;
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call");
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "wasm-call", // debug name
+ 0, // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* ReplaceTypeInCallDescriptorWith(
@@ -295,21 +301,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
LocationSignature::Builder locations(zone, return_count, parameter_count);
- Allocator rets = return_registers;
-
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
- for (size_t j = 0; j < num_replacements; j++) {
- locations.AddReturn(rets.Next(output_type));
- }
- } else {
- locations.AddReturn(
- rets.Next(descriptor->GetReturnType(i).representation()));
- }
- }
-
Allocator params = parameter_registers;
-
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
if (descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
@@ -321,17 +313,32 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
}
}
- return new (zone) CallDescriptor( // --
- descriptor->kind(), // kind
- descriptor->GetInputType(0), // target MachineType
- descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- descriptor->properties(), // properties
- descriptor->CalleeSavedRegisters(), // callee-saved registers
- descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- descriptor->flags(), // flags
- descriptor->debug_name());
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t j = 0; j < num_replacements; j++) {
+ locations.AddReturn(rets.Next(output_type));
+ }
+ } else {
+ locations.AddReturn(
+ rets.Next(descriptor->GetReturnType(i).representation()));
+ }
+ }
+
+ return new (zone) CallDescriptor( // --
+ descriptor->kind(), // kind
+ descriptor->GetInputType(0), // target MachineType
+ descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ descriptor->properties(), // properties
+ descriptor->CalleeSavedRegisters(), // callee-saved registers
+ descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ descriptor->flags(), // flags
+ descriptor->debug_name(), // debug name
+ descriptor->AllocatableRegisters(), // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index ea417533f2..bc92f9707c 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -156,18 +156,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xorl(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -295,7 +283,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
ReferenceMap* reference_map = new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
private:
@@ -456,241 +444,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadNaN(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ Pcmpeqd(result_, result_); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadZero(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- Label oob; \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, &oob, Label::kNear); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ xorl(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode const rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- auto value = i.InputDoubleRegister(4); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreFloat( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreInteger( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(4)->IsRegister()) { \
- Register value = i.InputRegister(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
__ PrepareCallCFunction(2); \
@@ -840,6 +593,11 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
+inline bool HasCallDescriptorFlag(Instruction* instr,
+ CallDescriptor::Flag flag) {
+ return MiscField::decode(instr->opcode()) & flag;
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -854,7 +612,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -867,11 +629,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (info()->IsWasm()) {
__ near_call(wasm_code, RelocInfo::WASM_CALL);
} else {
- __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
}
} else {
Register reg = i.InputRegister(0);
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -890,7 +660,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -909,7 +683,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -919,7 +697,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -930,7 +712,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1093,6 +875,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kLFence:
+ __ lfence();
+ break;
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -2216,22 +2001,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
+ } else if (instr->InputAt(0)->IsFloatRegister() ||
+ instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else {
+ } else if (instr->InputAt(0)->IsSimd128Register()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
+ } else if (instr->InputAt(0)->IsStackSlot() ||
+ instr->InputAt(0)->IsFloatStackSlot() ||
+ instr->InputAt(0)->IsDoubleStackSlot()) {
__ pushq(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
+ __ Movups(kScratchDoubleReg, i.InputOperand(0));
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), kScratchDoubleReg);
}
break;
case kX64Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2239,6 +2043,101 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Movsd(i.OutputDoubleRegister(), Operand(rbp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Movss(i.OutputFloatRegister(), Operand(rbp, offset));
+ }
+ } else {
+ __ movq(i.OutputRegister(), Operand(rbp, offset));
+ }
+ break;
+ }
+ // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
+ case kX64F32x4Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Movss(dst, i.InputDoubleRegister(0));
+ } else {
+ __ Movss(dst, i.InputOperand(0));
+ }
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kX64F32x4ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ extractps(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movd(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // The insertps instruction uses imm8[5:4] to indicate the lane
+ // that needs to be replaced.
+ byte select = i.InputInt8(1) << 4 & 0x30;
+ __ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
+ break;
+ }
+ case kX64F32x4RecipApprox: {
+ __ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4RecipSqrtApprox: {
+ __ rsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x0);
+ break;
+ }
+ case kX64F32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x4);
+ break;
+ }
+ case kX64F32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
@@ -2669,48 +2568,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(movb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(movw);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(movl);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(movq);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
- break;
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
@@ -2954,7 +2811,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3082,7 +2939,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3124,13 +2981,15 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= base::bits::CountPopulation(saves_fp);
+ shrink_slots -=
+ base::bits::CountPopulation(saves_fp) * (kQuadWordSize / kPointerSize);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
@@ -3157,6 +3016,11 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3165,6 +3029,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ addq(rsp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ popq(Register::from_code(i));
@@ -3212,7 +3080,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), rcx);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 9c268ededf..6d9bc6f820 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -56,6 +56,7 @@ namespace compiler {
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -143,7 +144,22 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
+ V(X64Peek) \
V(X64StackCheck) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index ba775e72af..c16fee5861 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -123,6 +123,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F32x4Splat:
+ case kX64F32x4ExtractLane:
+ case kX64F32x4ReplaceLane:
+ case kX64F32x4RecipApprox:
+ case kX64F32x4RecipSqrtApprox:
+ case kX64F32x4Add:
+ case kX64F32x4Sub:
+ case kX64F32x4Mul:
+ case kX64F32x4Min:
+ case kX64F32x4Max:
+ case kX64F32x4Eq:
+ case kX64F32x4Ne:
+ case kX64F32x4Lt:
+ case kX64F32x4Le:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -240,12 +254,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
+ case kX64Peek:
return kIsLoadOperation;
case kX64Push:
case kX64Poke:
return kHasSideEffect;
+ case kLFence:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
@@ -261,20 +279,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for x64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kX64Imul:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 04fec146de..a0f14c687c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -109,7 +109,7 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement)
: UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
@@ -289,6 +289,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -399,118 +404,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.TempImmediate(0), length_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- value_operand);
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.TempImmediate(0), length_operand, value_operand);
-}
-
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -579,7 +472,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -597,9 +491,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
- if (m.right().Is(0xff)) {
+ if (m.right().Is(0xFF)) {
Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
- } else if (m.right().Is(0xffff)) {
+ } else if (m.right().Is(0xFFFF)) {
Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else {
VisitBinop(this, node, kX64And32);
@@ -823,6 +717,10 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
+ // In the case that the base address was zero, the displacement will be
+ // in a register and replacing it with an immediate is not allowed. This
+ // usually only happens in dead code anyway.
+ if (!inputs[input_count - 1].IsImmediate()) return false;
int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
inputs[input_count - 1] =
ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
@@ -1369,6 +1267,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1376,6 +1275,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
VisitRR(this, node, kArchTruncateDoubleToI);
@@ -1538,11 +1438,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ InstructionOperand value = g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1550,31 +1450,55 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
- Node* input_node = input.node();
- if (g.CanBeImmediate(input_node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+ // Skip any alignment holes in pushed nodes. We may have one in case of a
+ // Simd128 stack argument.
+ if (input.node == nullptr) continue;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input_node))) {
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
- } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+ Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kX64Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+ Emit(kX64Push, g.NoOutput(), g.Use(input.node));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ InstructionOperand slot = g.UseImmediate(reverse_slot);
+ Emit(kX64Peek, 1, &result, 1, &slot);
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1602,7 +1526,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1624,7 +1549,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1812,7 +1738,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
} else {
@@ -2012,14 +1939,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2442,16 +2369,21 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F32x4) \
V(I32x4) \
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2505,6 +2437,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
V(I16x8Neg) \
V(I8x16Neg) \
@@ -2580,6 +2514,10 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_TYPES
+#undef SIMD_BINOP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_SHIFT_OPCODES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2601,7 +2539,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 4a1deb00e2..22e3606e98 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -24,10 +24,10 @@ ScriptContextTable* ScriptContextTable::cast(Object* context) {
return reinterpret_cast<ScriptContextTable*>(context);
}
-int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlot)); }
+int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlotIndex)); }
void ScriptContextTable::set_used(int used) {
- set(kUsedSlot, Smi::FromInt(used));
+ set(kUsedSlotIndex, Smi::FromInt(used));
}
@@ -36,7 +36,7 @@ Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
int i) {
DCHECK(i < table->used());
return Handle<Context>::cast(
- FixedArray::get(*table, i + kFirstContextSlot, table->GetIsolate()));
+ FixedArray::get(*table, i + kFirstContextSlotIndex, table->GetIsolate()));
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index bf55b391e7..04c4b4899d 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -19,7 +19,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
int used = table->used();
int length = table->length();
CHECK(used >= 0 && length > 0 && used < length);
- if (used + kFirstContextSlot == length) {
+ if (used + kFirstContextSlotIndex == length) {
CHECK(length < Smi::kMaxValue / 2);
Isolate* isolate = table->GetIsolate();
Handle<FixedArray> copy =
@@ -32,7 +32,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
result->set_used(used + 1);
DCHECK(script_context->IsScriptContext());
- result->set(used + kFirstContextSlot, *script_context);
+ result->set(used + kFirstContextSlotIndex, *script_context);
return result;
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 5f8eecb201..c1bca7557e 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -5,7 +5,7 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-#include "src/objects.h"
+#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
@@ -316,6 +316,8 @@ enum ContextLookupFlags {
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
+ V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \
+ V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
promise_get_capabilities_executor_shared_fun) \
V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
@@ -343,6 +345,7 @@ enum ContextLookupFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
+ V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
@@ -450,15 +453,8 @@ class ScriptContextTable : public FixedArray {
static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
Handle<Context> script_context);
- static int GetContextOffset(int context_index) {
- return kFirstContextOffset + context_index * kPointerSize;
- }
-
- private:
- static const int kUsedSlot = 0;
- static const int kFirstContextSlot = kUsedSlot + 1;
- static const int kFirstContextOffset =
- FixedArray::kHeaderSize + kFirstContextSlot * kPointerSize;
+ static const int kUsedSlotIndex = 0;
+ static const int kFirstContextSlotIndex = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
};
@@ -566,6 +562,9 @@ class Context: public FixedArray {
static const int FIRST_FUNCTION_MAP_INDEX = SLOPPY_FUNCTION_MAP_INDEX;
static const int LAST_FUNCTION_MAP_INDEX = CLASS_FUNCTION_MAP_INDEX;
+ static const int kNoContext = 0;
+ static const int kInvalidContext = 1;
+
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index b9be0e097c..c5ea1b8366 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -333,12 +333,12 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
(*current == 'o' || *current == 'O')) {
radix_ = 8;
++current;
- DCHECK(current != end);
+ if (current == end) return set_state(kJunk);
} else if (allow_binary_and_octal_prefixes_ &&
(*current == 'b' || *current == 'B')) {
radix_ = 2;
++current;
- DCHECK(current != end);
+ if (current == end) return set_state(kJunk);
} else {
leading_zero_ = true;
}
@@ -413,7 +413,7 @@ void StringToIntHelper::ParseInternal(Char start) {
// in 32 bits. When we can't guarantee that the next iteration
// will not overflow the multiplier, we stop parsing the part
// by leaving the loop.
- const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+ const uint32_t kMaximumMultiplier = 0xFFFFFFFFU / 36;
uint32_t m = multiplier * static_cast<uint32_t>(radix_);
if (m > kMaximumMultiplier) break;
part = part * radix_ + d;
@@ -953,6 +953,7 @@ MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
}
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
+ string = String::Flatten(string);
BigIntParseIntHelper helper(isolate, string);
return helper.GetResult();
}
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index f085478bf3..abde3a1af5 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -15,7 +15,7 @@ void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
DCHECK(!IsStarted());
counter_ = counter;
parent_.SetValue(parent);
- if (FLAG_runtime_stats ==
+ if (base::AsAtomic32::Relaxed_Load(&FLAG_runtime_stats) ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
return;
}
@@ -57,8 +57,8 @@ void RuntimeCallTimer::CommitTimeToCounter() {
bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
-RuntimeCallTimerScope::RuntimeCallTimerScope(
- HeapObject* heap_object, RuntimeCallStats::CounterId counter_id)
+RuntimeCallTimerScope::RuntimeCallTimerScope(HeapObject* heap_object,
+ RuntimeCallCounterId counter_id)
: RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id) {}
} // namespace internal
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index c754e6fdef..e41fa276a8 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -444,80 +444,46 @@ RuntimeCallStats::RuntimeCallStats() : in_use_(false) {
FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
#undef CALL_BUILTIN_COUNTER
};
- for (int i = 0; i < counters_count; i++) {
- this->*(counters[i]) = RuntimeCallCounter(kNames[i]);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ this->counters_[i] = RuntimeCallCounter(kNames[i]);
}
}
-// static
-const RuntimeCallStats::CounterId RuntimeCallStats::counters[] = {
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::GC_##name,
- FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_RUNTIME_COUNTER(name) &RuntimeCallStats::name,
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
- &RuntimeCallStats::Runtime_##name, //
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Builtin_##name,
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::API_##name,
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Handler_##name,
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-};
-
-// static
-const int RuntimeCallStats::counters_count =
- arraysize(RuntimeCallStats::counters);
-
-// static
-void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
- CounterId counter_id) {
- DCHECK(stats->IsCalledOnTheSameThread());
- RuntimeCallCounter* counter = &(stats->*counter_id);
+void RuntimeCallStats::Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallCounter* counter = GetCounter(counter_id);
DCHECK_NOT_NULL(counter->name());
- timer->Start(counter, stats->current_timer());
- stats->current_timer_.SetValue(timer);
- stats->current_counter_.SetValue(counter);
+ timer->Start(counter, current_timer());
+ current_timer_.SetValue(timer);
+ current_counter_.SetValue(counter);
}
-// static
-void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
- DCHECK(stats->IsCalledOnTheSameThread());
- RuntimeCallTimer* stack_top = stats->current_timer();
+void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallTimer* stack_top = current_timer();
if (stack_top == nullptr) return; // Missing timer is a result of Reset().
CHECK(stack_top == timer);
- stats->current_timer_.SetValue(timer->Stop());
- RuntimeCallTimer* cur_timer = stats->current_timer();
- stats->current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
+ current_timer_.SetValue(timer->Stop());
+ RuntimeCallTimer* cur_timer = current_timer();
+ current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
}
void RuntimeCallStats::Add(RuntimeCallStats* other) {
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- RuntimeCallCounter* other_counter = &(other->*counter_id);
- counter->Add(other_counter);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Add(other->GetCounter(i));
}
}
// static
-void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
- CounterId counter_id) {
- DCHECK(stats->IsCalledOnTheSameThread());
- // When RCS are enabled dynamically there might be no stats or timer set up.
- if (stats == nullptr) return;
- RuntimeCallTimer* timer = stats->current_timer_.Value();
+void RuntimeCallStats::CorrectCurrentCounterId(
+ RuntimeCallCounterId counter_id) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallTimer* timer = current_timer();
if (timer == nullptr) return;
- RuntimeCallCounter* counter = &(stats->*counter_id);
+ RuntimeCallCounter* counter = GetCounter(counter_id);
timer->set_counter(counter);
- stats->current_counter_.SetValue(counter);
+ current_counter_.SetValue(counter);
}
bool RuntimeCallStats::IsCalledOnTheSameThread() {
@@ -537,10 +503,8 @@ void RuntimeCallStats::Print(std::ostream& os) {
if (current_timer_.Value() != nullptr) {
current_timer_.Value()->Snapshot();
}
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- entries.Add(counter);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ entries.Add(GetCounter(i));
}
entries.Print(os);
}
@@ -556,22 +520,17 @@ void RuntimeCallStats::Reset() {
current_timer_.SetValue(current_timer_.Value()->Stop());
}
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- counter->Reset();
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Reset();
}
in_use_ = true;
}
void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
- for (const RuntimeCallStats::CounterId counter_id :
- RuntimeCallStats::counters) {
- RuntimeCallCounter* counter = &(this->*counter_id);
- if (counter->count() > 0) counter->Dump(value);
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ if (GetCounter(i)->count() > 0) GetCounter(i)->Dump(value);
}
-
in_use_ = false;
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 56873db092..b3c6f8c8ff 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -579,6 +579,7 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
class RuntimeCallCounter final {
public:
+ RuntimeCallCounter() : RuntimeCallCounter(nullptr) {}
explicit RuntimeCallCounter(const char* name)
: name_(name), count_(0), time_(0) {}
V8_NOINLINE void Reset();
@@ -596,8 +597,6 @@ class RuntimeCallCounter final {
private:
friend class RuntimeCallStats;
- RuntimeCallCounter() {}
-
const char* name_;
int64_t count_;
// Stored as int64_t so that its initialization can be deferred.
@@ -634,7 +633,9 @@ class RuntimeCallTimer final {
base::TimeDelta elapsed_;
};
-#define FOR_EACH_GC_COUNTER(V) TRACER_SCOPES(V)
+#define FOR_EACH_GC_COUNTER(V) \
+ TRACER_SCOPES(V) \
+ TRACER_BACKGROUND_SCOPES(V)
#define FOR_EACH_API_COUNTER(V) \
V(ArrayBuffer_Cast) \
@@ -785,8 +786,7 @@ class RuntimeCallTimer final {
#define FOR_EACH_MANUAL_COUNTER(V) \
V(AccessorGetterCallback) \
- V(AccessorNameGetterCallback) \
- V(AccessorNameSetterCallback) \
+ V(AccessorSetterCallback) \
V(ArrayLengthGetter) \
V(ArrayLengthSetter) \
V(BoundFunctionNameGetter) \
@@ -821,18 +821,21 @@ class RuntimeCallTimer final {
V(GC_Custom_SlowAllocateRaw) \
V(GCEpilogueCallback) \
V(GCPrologueCallback) \
- V(GenericNamedPropertyDefinerCallback) \
- V(GenericNamedPropertyDeleterCallback) \
- V(GenericNamedPropertyDescriptorCallback) \
- V(GenericNamedPropertyQueryCallback) \
- V(GenericNamedPropertySetterCallback) \
V(GetMoreDataCallback) \
- V(IndexedPropertyDefinerCallback) \
- V(IndexedPropertyDeleterCallback) \
- V(IndexedPropertyDescriptorCallback) \
- V(IndexedPropertyGetterCallback) \
- V(IndexedPropertyQueryCallback) \
- V(IndexedPropertySetterCallback) \
+ V(NamedDefinerCallback) \
+ V(NamedDeleterCallback) \
+ V(NamedDescriptorCallback) \
+ V(NamedQueryCallback) \
+ V(NamedSetterCallback) \
+ V(NamedGetterCallback) \
+ V(NamedEnumeratorCallback) \
+ V(IndexedDefinerCallback) \
+ V(IndexedDeleterCallback) \
+ V(IndexedDescriptorCallback) \
+ V(IndexedGetterCallback) \
+ V(IndexedQueryCallback) \
+ V(IndexedSetterCallback) \
+ V(IndexedEnumeratorCallback) \
V(InvokeApiInterruptCallbacks) \
V(InvokeFunctionCallback) \
V(JS_Execution) \
@@ -878,6 +881,8 @@ class RuntimeCallTimer final {
V(KeyedStoreIC_SlowStub) \
V(KeyedStoreIC_StoreFastElementStub) \
V(KeyedStoreIC_StoreElementStub) \
+ V(LoadGlobalIC_LoadScriptContextField) \
+ V(LoadGlobalIC_SlowStub) \
V(LoadIC_FunctionPrototypeStub) \
V(LoadIC_HandlerCacheHit_Accessor) \
V(LoadIC_LoadAccessorDH) \
@@ -899,12 +904,13 @@ class RuntimeCallTimer final {
V(LoadIC_LoadNonexistentDH) \
V(LoadIC_LoadNormalDH) \
V(LoadIC_LoadNormalFromPrototypeDH) \
- V(LoadIC_LoadScriptContextFieldStub) \
V(LoadIC_NonReceiver) \
V(LoadIC_Premonomorphic) \
V(LoadIC_SlowStub) \
V(LoadIC_StringLength) \
V(LoadIC_StringWrapperLength) \
+ V(StoreGlobalIC_StoreScriptContextField) \
+ V(StoreGlobalIC_SlowStub) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
@@ -919,53 +925,48 @@ class RuntimeCallTimer final {
V(StoreIC_StoreNativeDataPropertyDH) \
V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
V(StoreIC_StoreNormalDH) \
- V(StoreIC_StoreScriptContextFieldStub) \
V(StoreIC_StoreTransitionDH)
-class RuntimeCallStats final : public ZoneObject {
- public:
- typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
- V8_EXPORT_PRIVATE RuntimeCallStats();
-
-#define CALL_RUNTIME_COUNTER(name) RuntimeCallCounter GC_##name;
+enum RuntimeCallCounterId {
+#define CALL_RUNTIME_COUNTER(name) kGC_##name,
FOR_EACH_GC_COUNTER(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name) RuntimeCallCounter name;
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#define CALL_RUNTIME_COUNTER(name) k##name,
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
- RuntimeCallCounter Runtime_##name;
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) kRuntime_##name,
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter Builtin_##name;
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kBuiltin_##name,
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter API_##name;
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kAPI_##name,
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) RuntimeCallCounter Handler_##name;
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#define CALL_BUILTIN_COUNTER(name) kHandler_##name,
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
+ kNumberOfCounters
+};
- static const CounterId counters[];
- static const int counters_count;
+class RuntimeCallStats final : public ZoneObject {
+ public:
+ V8_EXPORT_PRIVATE RuntimeCallStats();
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
- V8_EXPORT_PRIVATE static void Enter(RuntimeCallStats* stats,
- RuntimeCallTimer* timer,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE void Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id);
// Leave a scope for a measured runtime function. This will properly add
// the time delta to the current_counter and subtract the delta from its
// parent.
- V8_EXPORT_PRIVATE static void Leave(RuntimeCallStats* stats,
- RuntimeCallTimer* timer);
+ V8_EXPORT_PRIVATE void Leave(RuntimeCallTimer* timer);
// Set counter id for the innermost measurement. It can be used to refine
// event kind when a runtime entry counter is too generic.
- V8_EXPORT_PRIVATE static void CorrectCurrentCounterId(RuntimeCallStats* stats,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE void CorrectCurrentCounterId(
+ RuntimeCallCounterId counter_id);
V8_EXPORT_PRIVATE void Reset();
// Add all entries from another stats object.
@@ -980,6 +981,15 @@ class RuntimeCallStats final : public ZoneObject {
bool InUse() { return in_use_; }
bool IsCalledOnTheSameThread();
+ static const int kNumberOfCounters =
+ static_cast<int>(RuntimeCallCounterId::kNumberOfCounters);
+ RuntimeCallCounter* GetCounter(RuntimeCallCounterId counter_id) {
+ return &counters_[static_cast<int>(counter_id)];
+ }
+ RuntimeCallCounter* GetCounter(int counter_id) {
+ return &counters_[counter_id];
+ }
+
private:
// Top of a stack of active timers.
base::AtomicValue<RuntimeCallTimer*> current_timer_;
@@ -988,40 +998,41 @@ class RuntimeCallStats final : public ZoneObject {
// Used to track nested tracing scopes.
bool in_use_;
ThreadId thread_id_;
+ RuntimeCallCounter counters_[kNumberOfCounters];
};
-#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_name) \
- do { \
- if (V8_UNLIKELY(FLAG_runtime_stats)) { \
- RuntimeCallStats::CorrectCurrentCounterId( \
- runtime_call_stats, &RuntimeCallStats::counter_name); \
- } \
+#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
+ do { \
+ if (V8_UNLIKELY(FLAG_runtime_stats) && runtime_call_stats) { \
+ runtime_call_stats->CorrectCurrentCounterId(counter_id); \
+ } \
} while (false)
-#define TRACE_HANDLER_STATS(isolate, counter_name) \
- CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
- Handler_##counter_name)
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+ CHANGE_CURRENT_RUNTIME_COUNTER( \
+ isolate->counters()->runtime_call_stats(), \
+ RuntimeCallCounterId::kHandler_##counter_name)
// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
// the time of C++ scope.
class RuntimeCallTimerScope {
public:
inline RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallStats::CounterId counter_id);
+ RuntimeCallCounterId counter_id);
// This constructor is here just to avoid calling GetIsolate() when the
// stats are disabled and the isolate is not directly available.
inline RuntimeCallTimerScope(HeapObject* heap_object,
- RuntimeCallStats::CounterId counter_id);
+ RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
- RuntimeCallStats::CounterId counter_id) {
+ RuntimeCallCounterId counter_id) {
if (V8_LIKELY(!FLAG_runtime_stats || stats == nullptr)) return;
stats_ = stats;
- RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ stats_->Enter(&timer_, counter_id);
}
inline ~RuntimeCallTimerScope() {
if (V8_UNLIKELY(stats_ != nullptr)) {
- RuntimeCallStats::Leave(stats_, &timer_);
+ stats_->Leave(&timer_);
}
}
@@ -1034,6 +1045,9 @@ class RuntimeCallTimerScope {
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms: HR(name, caption, min, max, num_buckets) */ \
+ HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
+ HR(background_scavenger, V8.GCBackgroundScavenger, 0, 10000, 101) \
+ HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
@@ -1131,6 +1145,8 @@ class RuntimeCallTimerScope {
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
1000000, MICROSECOND) \
+ HT(liftoff_compile_time, V8.LiftoffCompileMicroSeconds, 10000000, \
+ MICROSECOND) \
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
@@ -1384,28 +1400,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
STATS_COUNTER_TS_LIST(SC)
#undef SC
-#define SC(name) \
- StatsCounter* count_of_##name() { return &count_of_##name##_; } \
- StatsCounter* size_of_##name() { return &size_of_##name##_; }
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_CODE_TYPE_##name() \
- { return &count_of_CODE_TYPE_##name##_; } \
- StatsCounter* size_of_CODE_TYPE_##name() \
- { return &size_of_CODE_TYPE_##name##_; }
- CODE_KIND_LIST(SC)
-#undef SC
-
-#define SC(name) \
- StatsCounter* count_of_FIXED_ARRAY_##name() \
- { return &count_of_FIXED_ARRAY_##name##_; } \
- StatsCounter* size_of_FIXED_ARRAY_##name() \
- { return &size_of_FIXED_ARRAY_##name##_; }
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
-#undef SC
-
// clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
@@ -1541,11 +1535,11 @@ void HistogramTimer::Stop() {
TimedHistogram::Stop(&timer_, counters()->isolate());
}
-RuntimeCallTimerScope::RuntimeCallTimerScope(
- Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
+RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallCounterId counter_id) {
if (V8_LIKELY(!FLAG_runtime_stats)) return;
stats_ = isolate->counters()->runtime_call_stats();
- RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ stats_->Enter(&timer_, counter_id);
}
} // namespace internal
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 8836fdb0e5..3aae30799f 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -31,16 +31,16 @@ static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
static const int kUtf8SingleByteMask = 0x80;
static const int kUtf8SingleByteValue = 0x00;
// 2-byte encoding.
- static const int kUtf8TwoByteMask = 0xe0;
- static const int kUtf8TwoByteValue = 0xc0;
+ static const int kUtf8TwoByteMask = 0xE0;
+ static const int kUtf8TwoByteValue = 0xC0;
// 3-byte encoding.
- static const int kUtf8ThreeByteMask = 0xf0;
- static const int kUtf8ThreeByteValue = 0xe0;
+ static const int kUtf8ThreeByteMask = 0xF0;
+ static const int kUtf8ThreeByteValue = 0xE0;
// 4-byte encoding.
- static const int kUtf8FourByteMask = 0xf8;
- static const int kUtf8FourByteValue = 0xf0;
+ static const int kUtf8FourByteMask = 0xF8;
+ static const int kUtf8FourByteValue = 0xF0;
// Subsequent bytes of a multi-byte encoding.
- static const int kMultiByteMask = 0xc0;
+ static const int kMultiByteMask = 0xC0;
static const int kMultiByteValue = 0x80;
int multi_byte_bytes_seen = 0;
while (answer > 0) {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 54a41fc00e..32f129821a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -41,10 +41,6 @@
#include "src/utils.h"
#include "src/v8.h"
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -66,136 +62,128 @@ namespace v8 {
namespace {
-const int MB = 1024 * 1024;
-const int kMaxWorkers = 50;
-const int kMaxSerializerMemoryUsage = 1 * MB; // Arbitrary maximum for testing.
+const int kMB = 1024 * 1024;
-#define USE_VM 1
-#define VM_THRESHOLD 65536
-// TODO(titzer): allocations should fail if >= 2gb because of
-// array buffers storing the lengths as a SMI internally.
-#define TWO_GB (2u * 1024u * 1024u * 1024u)
+const int kMaxWorkers = 50;
+const int kMaxSerializerMemoryUsage =
+ 1 * kMB; // Arbitrary maximum for testing.
-// Forwards memory reservation and protection functions to the V8 default
-// allocator. Used by ShellArrayBufferAllocator and MockArrayBufferAllocator.
+// Base class for shell ArrayBuffer allocators. It forwards all opertions to
+// the default v8 allocator.
class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
- std::unique_ptr<Allocator> allocator_ =
- std::unique_ptr<Allocator>(NewDefaultAllocator());
-
public:
- void* Reserve(size_t length) override { return allocator_->Reserve(length); }
+ void* Allocate(size_t length) override {
+ return allocator_->Allocate(length);
+ }
- void Free(void*, size_t) override = 0;
+ void* AllocateUninitialized(size_t length) override {
+ return allocator_->AllocateUninitialized(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ allocator_->Free(data, length);
+ }
+
+ void* Reserve(size_t length) override { return allocator_->Reserve(length); }
void Free(void* data, size_t length, AllocationMode mode) override {
- switch (mode) {
- case AllocationMode::kNormal: {
- return Free(data, length);
- }
- case AllocationMode::kReservation: {
- return allocator_->Free(data, length, mode);
- }
- }
+ allocator_->Free(data, length, mode);
}
void SetProtection(void* data, size_t length,
Protection protection) override {
allocator_->SetProtection(data, length, protection);
}
+
+ private:
+ std::unique_ptr<Allocator> allocator_ =
+ std::unique_ptr<Allocator>(NewDefaultAllocator());
};
+// ArrayBuffer allocator that can use virtual memory to improve performance.
class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
public:
void* Allocate(size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) {
- void* data = VirtualMemoryAllocate(length);
-#if DEBUG
- if (data) {
- // In debug mode, check the memory is zero-initialized.
- size_t limit = length / sizeof(uint64_t);
- uint64_t* ptr = reinterpret_cast<uint64_t*>(data);
- for (size_t i = 0; i < limit; i++) {
- DCHECK_EQ(0u, ptr[i]);
- }
- }
-#endif
- return data;
- }
-#endif
- void* data = AllocateUninitialized(length);
- return data == nullptr ? data : memset(data, 0, length);
+ if (length >= kVMThreshold) return AllocateVM(length);
+ return ArrayBufferAllocatorBase::Allocate(length);
}
+
void* AllocateUninitialized(size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) return VirtualMemoryAllocate(length);
-#endif
-// Work around for GCC bug on AIX
-// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
-#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
- return __linux_malloc(length);
-#else
- return malloc(length);
-#endif
+ if (length >= kVMThreshold) return AllocateVM(length);
+ return ArrayBufferAllocatorBase::AllocateUninitialized(length);
}
- using ArrayBufferAllocatorBase::Free;
+
void Free(void* data, size_t length) override {
-#if USE_VM
- if (RoundToPageSize(&length)) {
- CHECK(base::OS::Free(data, length));
- return;
+ if (length >= kVMThreshold) {
+ FreeVM(data, length);
+ } else {
+ ArrayBufferAllocatorBase::Free(data, length);
}
-#endif
- free(data);
}
- // If {length} is at least {VM_THRESHOLD}, round up to next page size and
- // return {true}. Otherwise return {false}.
- bool RoundToPageSize(size_t* length) {
- size_t page_size = base::OS::AllocatePageSize();
- if (*length >= VM_THRESHOLD && *length < TWO_GB) {
- *length = RoundUp(*length, page_size);
- return true;
- }
- return false;
+
+ void* Reserve(size_t length) override {
+ // |length| must be over the threshold so we can distinguish VM from
+ // malloced memory.
+ DCHECK_LE(kVMThreshold, length);
+ return ArrayBufferAllocatorBase::Reserve(length);
}
-#if USE_VM
- void* VirtualMemoryAllocate(size_t length) {
- size_t page_size = base::OS::AllocatePageSize();
- size_t alloc_size = RoundUp(length, page_size);
- void* address = base::OS::Allocate(nullptr, alloc_size, page_size,
- base::OS::MemoryPermission::kReadWrite);
- if (address != nullptr) {
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address, alloc_size);
-#endif
- MSAN_MEMORY_IS_INITIALIZED(address, alloc_size);
- }
- return address;
+
+ void Free(void* data, size_t length, AllocationMode) override {
+ // Ignore allocation mode; the appropriate action is determined by |length|.
+ Free(data, length);
}
-#endif
-};
-class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
- const size_t kAllocationLimit = 10 * MB;
- size_t get_actual_length(size_t length) const {
- return length > kAllocationLimit ? base::OS::AllocatePageSize() : length;
+ private:
+ static constexpr size_t kVMThreshold = 65536;
+ static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
+
+ void* AllocateVM(size_t length) {
+ DCHECK_LE(kVMThreshold, length);
+ // TODO(titzer): allocations should fail if >= 2gb because array buffers
+ // store their lengths as a SMI internally.
+ if (length >= kTwoGB) return nullptr;
+
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ // Rounding up could go over the limit.
+ if (allocated >= kTwoGB) return nullptr;
+ return i::AllocatePages(nullptr, allocated, page_size,
+ PageAllocator::kReadWrite);
}
- public:
+ void FreeVM(void* data, size_t length) {
+ size_t page_size = i::AllocatePageSize();
+ size_t allocated = RoundUp(length, page_size);
+ CHECK(i::FreePages(data, allocated));
+ }
+};
+
+// ArrayBuffer allocator that never allocates over 10MB.
+class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* Allocate(size_t length) override {
- const size_t actual_length = get_actual_length(length);
- void* data = AllocateUninitialized(actual_length);
- return data == nullptr ? data : memset(data, 0, actual_length);
+ return ArrayBufferAllocatorBase::Allocate(Adjust(length));
}
+
void* AllocateUninitialized(size_t length) override {
- return malloc(get_actual_length(length));
+ return ArrayBufferAllocatorBase::AllocateUninitialized(Adjust(length));
}
- void Free(void* p, size_t) override { free(p); }
- void Free(void* data, size_t length, AllocationMode mode) override {
- ArrayBufferAllocatorBase::Free(data, get_actual_length(length), mode);
+
+ void Free(void* data, size_t length) override {
+ return ArrayBufferAllocatorBase::Free(data, Adjust(length));
}
+
void* Reserve(size_t length) override {
- return ArrayBufferAllocatorBase::Reserve(get_actual_length(length));
+ return ArrayBufferAllocatorBase::Reserve(Adjust(length));
+ }
+
+ void Free(void* data, size_t length, AllocationMode mode) override {
+ return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
+ }
+
+ private:
+ size_t Adjust(size_t length) {
+ const size_t kAllocationLimit = 10 * kMB;
+ return length > kAllocationLimit ? i::AllocatePageSize() : length;
}
};
@@ -209,6 +197,18 @@ class PredictablePlatform : public Platform {
DCHECK_NOT_NULL(platform_);
}
+ PageAllocator* GetPageAllocator() override {
+ return platform_->GetPageAllocator();
+ }
+
+ void OnCriticalMemoryPressure() override {
+ platform_->OnCriticalMemoryPressure();
+ }
+
+ bool OnCriticalMemoryPressure(size_t length) override {
+ return platform_->OnCriticalMemoryPressure(length);
+ }
+
std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override {
return platform_->GetForegroundTaskRunner(isolate);
@@ -300,7 +300,7 @@ base::Thread::Options GetThreadOptions(const char* name) {
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code. 2Mbytes seems to be enough.
- return base::Thread::Options(name, 2 * MB);
+ return base::Thread::Options(name, 2 * kMB);
}
} // namespace
@@ -506,6 +506,9 @@ std::vector<Worker*> Shell::workers_;
std::vector<ExternalizedContents> Shell::externalized_contents_;
base::LazyMutex Shell::isolate_status_lock_;
std::map<v8::Isolate*, bool> Shell::isolate_status_;
+base::LazyMutex Shell::cached_code_mutex_;
+std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
+ Shell::cached_code_map_;
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -566,95 +569,39 @@ class BackgroundCompileThread : public base::Thread {
std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task_;
};
-ScriptCompiler::CachedData* CompileForCachedData(
- Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options) {
- int source_length = source->Length();
- uint16_t* source_buffer = new uint16_t[source_length];
- source->Write(source_buffer, 0, source_length);
- int name_length = 0;
- uint16_t* name_buffer = nullptr;
- if (name->IsString()) {
- Local<String> name_string = Local<String>::Cast(name);
- name_length = name_string->Length();
- name_buffer = new uint16_t[name_length];
- name_string->Write(name_buffer, 0, name_length);
+ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
+ Local<Value> source) {
+ base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ CHECK(source->IsString());
+ v8::String::Utf8Value key(isolate, source);
+ DCHECK(*key);
+ auto entry = cached_code_map_.find(*key);
+ if (entry != cached_code_map_.end() && entry->second) {
+ int length = entry->second->length;
+ uint8_t* cache = new uint8_t[length];
+ memcpy(cache, entry->second->data, length);
+ ScriptCompiler::CachedData* cached_data = new ScriptCompiler::CachedData(
+ cache, length, ScriptCompiler::CachedData::BufferOwned);
+ return cached_data;
}
- Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* temp_isolate = Isolate::New(create_params);
- i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
- temp_isolate->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- temp_isolate->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
- ScriptCompiler::CachedData* result = nullptr;
- {
- Isolate::Scope isolate_scope(temp_isolate);
- HandleScope handle_scope(temp_isolate);
- Context::Scope context_scope(Context::New(temp_isolate));
- Local<String> source_copy =
- v8::String::NewFromTwoByte(temp_isolate, source_buffer,
- v8::NewStringType::kNormal, source_length)
- .ToLocalChecked();
- Local<Value> name_copy;
- if (name_buffer) {
- name_copy =
- v8::String::NewFromTwoByte(temp_isolate, name_buffer,
- v8::NewStringType::kNormal, name_length)
- .ToLocalChecked();
- } else {
- name_copy = v8::Undefined(temp_isolate);
- }
- ScriptCompiler::Source script_source(source_copy, ScriptOrigin(name_copy));
- if (!ScriptCompiler::CompileUnboundScript(temp_isolate, &script_source,
- compile_options)
- .IsEmpty() &&
- script_source.GetCachedData()) {
- int length = script_source.GetCachedData()->length;
- uint8_t* cache = new uint8_t[length];
- memcpy(cache, script_source.GetCachedData()->data, length);
- result = new ScriptCompiler::CachedData(
- cache, length, ScriptCompiler::CachedData::BufferOwned);
- }
- }
- temp_isolate->Dispose();
- delete[] source_buffer;
- delete[] name_buffer;
- return result;
+ return nullptr;
}
-
-// Compile a string within the current v8 context.
-MaybeLocal<Script> Shell::CompileString(
- Isolate* isolate, Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options) {
- Local<Context> context(isolate->GetCurrentContext());
- ScriptOrigin origin(name);
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
- ScriptCompiler::Source script_source(source, origin);
- return ScriptCompiler::Compile(context, &script_source, compile_options);
- }
-
- ScriptCompiler::CachedData* data =
- CompileForCachedData(source, name, compile_options);
- ScriptCompiler::Source cached_source(source, origin, data);
- if (compile_options == ScriptCompiler::kProduceCodeCache) {
- compile_options = ScriptCompiler::kConsumeCodeCache;
- } else if (compile_options == ScriptCompiler::kProduceParserCache) {
- compile_options = ScriptCompiler::kConsumeParserCache;
- } else {
- DCHECK(false); // A new compile option?
- }
- if (data == nullptr) compile_options = ScriptCompiler::kNoCompileOptions;
- MaybeLocal<Script> result =
- ScriptCompiler::Compile(context, &cached_source, compile_options);
- CHECK(data == nullptr || !data->rejected);
- return result;
+void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
+ const ScriptCompiler::CachedData* cache_data) {
+ base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ CHECK(source->IsString());
+ if (cache_data == nullptr) return;
+ v8::String::Utf8Value key(isolate, source);
+ DCHECK(*key);
+ int length = cache_data->length;
+ uint8_t* cache = new uint8_t[length];
+ memcpy(cache, cache_data->data, length);
+ cached_code_map_[*key] = std::unique_ptr<ScriptCompiler::CachedData>(
+ new ScriptCompiler::CachedData(cache, length,
+ ScriptCompiler::CachedData::BufferOwned));
}
-
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
@@ -671,7 +618,24 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
MaybeLocal<Script> maybe_script;
- if (options.stress_background_compile) {
+ Local<Context> context(isolate->GetCurrentContext());
+ ScriptOrigin origin(name);
+
+ if (options.compile_options == ScriptCompiler::kConsumeCodeCache ||
+ options.compile_options == ScriptCompiler::kConsumeParserCache) {
+ ScriptCompiler::CachedData* cached_code =
+ LookupCodeCache(isolate, source);
+ if (cached_code != nullptr) {
+ ScriptCompiler::Source script_source(source, origin, cached_code);
+ maybe_script = ScriptCompiler::Compile(context, &script_source,
+ options.compile_options);
+ CHECK(!cached_code->rejected);
+ } else {
+ ScriptCompiler::Source script_source(source, origin);
+ maybe_script = ScriptCompiler::Compile(
+ context, &script_source, ScriptCompiler::kNoCompileOptions);
+ }
+ } else if (options.stress_background_compile) {
// Start a background thread compiling the script.
BackgroundCompileThread background_compile_thread(isolate, source);
background_compile_thread.Start();
@@ -679,18 +643,22 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
// In parallel, compile on the main thread to flush out any data races.
{
TryCatch ignore_try_catch(isolate);
- Shell::CompileString(isolate, source, name, options.compile_options);
+ ScriptCompiler::Source script_source(source, origin);
+ USE(ScriptCompiler::Compile(context, &script_source,
+ ScriptCompiler::kNoCompileOptions));
}
// Join with background thread and finalize compilation.
background_compile_thread.Join();
- ScriptOrigin origin(name);
maybe_script = v8::ScriptCompiler::Compile(
- isolate->GetCurrentContext(),
- background_compile_thread.streamed_source(), source, origin);
+ context, background_compile_thread.streamed_source(), source, origin);
} else {
- maybe_script =
- Shell::CompileString(isolate, source, name, options.compile_options);
+ ScriptCompiler::Source script_source(source, origin);
+ maybe_script = ScriptCompiler::Compile(context, &script_source,
+ options.compile_options);
+ if (options.compile_options == ScriptCompiler::kProduceParserCache) {
+ StoreInCodeCache(isolate, source, script_source.GetCachedData());
+ }
}
Local<Script> script;
@@ -700,7 +668,23 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
return false;
}
+ if (options.code_cache_options ==
+ ShellOptions::CodeCacheOptions::kProduceCache) {
+ // Serialize and store it in memory for the next execution.
+ ScriptCompiler::CachedData* cached_data =
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript(), source);
+ StoreInCodeCache(isolate, source, cached_data);
+ delete cached_data;
+ }
maybe_result = script->Run(realm);
+ if (options.code_cache_options ==
+ ShellOptions::CodeCacheOptions::kProduceCacheAfterExecute) {
+ // Serialize and store it in memory for the next execution.
+ ScriptCompiler::CachedData* cached_data =
+ ScriptCompiler::CreateCodeCache(script->GetUnboundScript(), source);
+ StoreInCodeCache(isolate, source, cached_data);
+ delete cached_data;
+ }
if (!EmptyMessageQueues(isolate)) success = false;
data->realm_current_ = data->realm_switch_;
}
@@ -2292,7 +2276,7 @@ Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
char* chars = ReadChars(name, &size);
if (chars == nullptr) return Local<String>();
Local<String> result;
- if (i::FLAG_use_external_strings && internal::String::IsAscii(chars, size)) {
+ if (i::FLAG_use_external_strings && i::String::IsAscii(chars, size)) {
String::ExternalOneByteStringResource* resource =
new ExternalOwningOneByteStringResource(
std::unique_ptr<const char[]>(chars), size);
@@ -2557,11 +2541,11 @@ void SourceGroup::ExecuteInThread() {
Shell::options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Execute(isolate);
+ Shell::CompleteMessageLoop(isolate);
}
DisposeModuleEmbedderData(context);
}
Shell::CollectGarbage(isolate);
- Shell::CompleteMessageLoop(isolate);
}
done_semaphore_.Signal();
}
@@ -2591,7 +2575,9 @@ void SourceGroup::JoinThread() {
}
ExternalizedContents::~ExternalizedContents() {
- Shell::array_buffer_allocator->Free(data_, size_);
+ if (base_ != nullptr) {
+ Shell::array_buffer_allocator->Free(base_, length_, mode_);
+ }
}
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
@@ -2863,11 +2849,23 @@ bool Shell::SetOptions(int argc, char* argv[]) {
strncmp(argv[i], "--cache=", 8) == 0) {
const char* value = argv[i] + 7;
if (!*value || strncmp(value, "=code", 6) == 0) {
- options.compile_options = v8::ScriptCompiler::kProduceCodeCache;
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCache;
} else if (strncmp(value, "=parse", 7) == 0) {
options.compile_options = v8::ScriptCompiler::kProduceParserCache;
} else if (strncmp(value, "=none", 6) == 0) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kNoProduceCache;
+ } else if (strncmp(value, "=after-execute", 15) == 0) {
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCacheAfterExecute;
+ } else if (strncmp(value, "=full-code-cache", 17) == 0) {
+ options.compile_options = v8::ScriptCompiler::kEagerCompile;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kProduceCache;
} else {
printf("Unknown option to --cache.\n");
return false;
@@ -2876,6 +2874,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--enable-tracing") == 0) {
options.trace_enabled = true;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--trace-path=", 13) == 0) {
+ options.trace_path = argv[i] + 13;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
options.trace_config = argv[i] + 15;
argv[i] = nullptr;
@@ -2956,6 +2957,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
InspectorClient inspector_client(context, options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate);
+ CompleteMessageLoop(isolate);
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -2963,7 +2965,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
WriteLcovData(isolate, options.lcov_file);
}
CollectGarbage(isolate);
- CompleteMessageLoop(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
if (last_run) {
options.isolate_sources[i].JoinThread();
@@ -3306,7 +3307,8 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = base::make_unique<platform::tracing::TracingController>();
- trace_file.open("v8_trace.json");
+
+ trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
@@ -3356,7 +3358,7 @@ int Shell::Main(int argc, char* argv[]) {
create_params.add_histogram_sample_callback = AddHistogramSample;
}
- if (i::trap_handler::UseTrapHandler()) {
+ if (i::trap_handler::IsTrapHandlerEnabled()) {
if (!v8::V8::RegisterDefaultSignalHandler()) {
fprintf(stderr, "Could not register signal handler");
exit(1);
@@ -3413,6 +3415,42 @@ int Shell::Main(int argc, char* argv[]) {
bool last_run = i == options.stress_runs - 1;
result = RunMain(isolate, argc, argv, last_run);
}
+ } else if (options.code_cache_options !=
+ ShellOptions::CodeCacheOptions::kNoProduceCache) {
+ printf("============ Run: Produce code cache ============\n");
+ // First run to produce the cache
+ result = RunMain(isolate, argc, argv, false);
+
+ // Change the options to consume cache
+ if (options.compile_options == v8::ScriptCompiler::kProduceParserCache) {
+ options.compile_options = v8::ScriptCompiler::kConsumeParserCache;
+ } else {
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options ==
+ v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
+ }
+
+ printf("============ Run: Consume code cache ============\n");
+ // Second run to consume the cache in new isolate
+ Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
+ Isolate* isolate2 = Isolate::New(create_params);
+ i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
+ isolate2->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
+ isolate2->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
+ {
+ D8Console console(isolate2);
+ debug::SetConsoleDelegate(isolate2, &console);
+ PerIsolateData data(isolate2);
+ Isolate::Scope isolate_scope(isolate2);
+
+ result = RunMain(isolate2, argc, argv, true);
+ }
+ isolate2->Dispose();
} else {
bool last_run = true;
result = RunMain(isolate, argc, argv, last_run);
@@ -3430,6 +3468,7 @@ int Shell::Main(int argc, char* argv[]) {
}
// Shut down contexts and collect garbage.
+ cached_code_map_.clear();
evaluation_context_.Reset();
stringify_function_.Reset();
CollectGarbage(isolate);
@@ -3438,6 +3477,9 @@ int Shell::Main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
+ // Delete the platform explicitly here to write the tracing output to the
+ // tracing file.
+ g_platform.reset();
return result;
}
@@ -3449,3 +3491,6 @@ int main(int argc, char* argv[]) {
return v8::Shell::Main(argc, argv);
}
#endif
+
+#undef CHECK
+#undef DCHECK
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index c699d91d68..8fc6eab046 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -149,28 +149,36 @@ class SourceGroup {
class ExternalizedContents {
public:
explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
- : data_(contents.Data()), size_(contents.ByteLength()) {}
+ : base_(contents.AllocationBase()),
+ length_(contents.AllocationLength()),
+ mode_(contents.AllocationMode()) {}
explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
- : data_(contents.Data()), size_(contents.ByteLength()) {}
+ : base_(contents.AllocationBase()),
+ length_(contents.AllocationLength()),
+ mode_(contents.AllocationMode()) {}
ExternalizedContents(ExternalizedContents&& other)
- : data_(other.data_), size_(other.size_) {
- other.data_ = nullptr;
- other.size_ = 0;
+ : base_(other.base_), length_(other.length_), mode_(other.mode_) {
+ other.base_ = nullptr;
+ other.length_ = 0;
+ other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
}
ExternalizedContents& operator=(ExternalizedContents&& other) {
if (this != &other) {
- data_ = other.data_;
- size_ = other.size_;
- other.data_ = nullptr;
- other.size_ = 0;
+ base_ = other.base_;
+ length_ = other.length_;
+ mode_ = other.mode_;
+ other.base_ = nullptr;
+ other.length_ = 0;
+ other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
}
return *this;
}
~ExternalizedContents();
private:
- void* data_;
- size_t size_;
+ void* base_;
+ size_t length_;
+ ArrayBuffer::Allocator::AllocationMode mode_;
DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
};
@@ -280,6 +288,12 @@ class Worker {
class ShellOptions {
public:
+ enum CodeCacheOptions {
+ kNoProduceCache,
+ kProduceCache,
+ kProduceCacheAfterExecute
+ };
+
ShellOptions()
: script_executed(false),
send_idle_notification(false),
@@ -296,11 +310,13 @@ class ShellOptions {
num_isolates(1),
compile_options(v8::ScriptCompiler::kNoCompileOptions),
stress_background_compile(false),
+ code_cache_options(CodeCacheOptions::kNoProduceCache),
isolate_sources(nullptr),
icu_data_file(nullptr),
natives_blob(nullptr),
snapshot_blob(nullptr),
trace_enabled(false),
+ trace_path(nullptr),
trace_config(nullptr),
lcov_file(nullptr),
disable_in_process_stack_traces(false),
@@ -329,11 +345,13 @@ class ShellOptions {
int num_isolates;
v8::ScriptCompiler::CompileOptions compile_options;
bool stress_background_compile;
+ CodeCacheOptions code_cache_options;
SourceGroup* isolate_sources;
const char* icu_data_file;
const char* natives_blob;
const char* snapshot_blob;
bool trace_enabled;
+ const char* trace_path;
const char* trace_config;
const char* lcov_file;
bool disable_in_process_stack_traces;
@@ -344,9 +362,6 @@ class ShellOptions {
class Shell : public i::AllStatic {
public:
- static MaybeLocal<Script> CompileString(
- Isolate* isolate, Local<String> source, Local<Value> name,
- v8::ScriptCompiler::CompileOptions compile_options);
static bool ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
bool report_exceptions);
@@ -504,10 +519,18 @@ class Shell : public i::AllStatic {
int index);
static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
const std::string& file_name);
+ static ScriptCompiler::CachedData* LookupCodeCache(Isolate* isolate,
+ Local<Value> name);
+ static void StoreInCodeCache(Isolate* isolate, Local<Value> name,
+ const ScriptCompiler::CachedData* data);
// We may have multiple isolates running concurrently, so the access to
// the isolate_status_ needs to be concurrency-safe.
static base::LazyMutex isolate_status_lock_;
static std::map<Isolate*, bool> isolate_status_;
+
+ static base::LazyMutex cached_code_mutex_;
+ static std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
+ cached_code_map_;
};
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index d53a6fdc4e..3eae96aa11 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -344,6 +344,16 @@ bool IsBlockMode(debug::Coverage::Mode mode) {
}
}
+bool IsBinaryMode(debug::Coverage::Mode mode) {
+ switch (mode) {
+ case debug::Coverage::kBlockBinary:
+ case debug::Coverage::kPreciseBinary:
+ return true;
+ default:
+ return false;
+ }
+}
+
void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
SharedFunctionInfo* info,
debug::Coverage::Mode mode) {
@@ -535,14 +545,29 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
case debug::Coverage::kPreciseBinary:
case debug::Coverage::kPreciseCount: {
HandleScope scope(isolate);
+
// Remove all optimized function. Optimized and inlined functions do not
// increment invocation count.
Deoptimizer::DeoptimizeAll(isolate);
- if (isolate->factory()
- ->feedback_vectors_for_profiling_tools()
- ->IsUndefined(isolate)) {
- isolate->InitializeVectorListFromHeap();
+
+ // Root all feedback vectors to avoid early collection.
+ isolate->MaybeInitializeVectorListFromHeap();
+
+ HeapIterator heap_iterator(isolate->heap());
+ while (HeapObject* o = heap_iterator.next()) {
+ if (IsBinaryMode(mode) && o->IsSharedFunctionInfo()) {
+ // If collecting binary coverage, reset
+ // SFI::has_reported_binary_coverage to avoid optimizing / inlining
+ // functions before they have reported coverage.
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(o);
+ shared->set_has_reported_binary_coverage(false);
+ } else if (o->IsFeedbackVector()) {
+ // In any case, clear any collected invocation counts.
+ FeedbackVector* vector = FeedbackVector::cast(o);
+ vector->clear_invocation_count();
+ }
}
+
break;
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index b6e3f14ed1..33bc81e5f7 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -16,6 +16,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -58,13 +59,6 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
if (!it.is_javascript()) return isolate->factory()->undefined_value();
JavaScriptFrame* frame = it.javascript_frame();
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save =
- DebugFrameHelper::FindSavedContextForFrame(isolate, frame);
- SaveContext savex(isolate);
- isolate->set_context(*(save->context()));
-
// This is not a lot different than DebugEvaluate::Global, except that
// variables accessible by the function we are evaluating from are
// materialized and included on top of the native context. Changes to
@@ -284,7 +278,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToString) \
V(ToLength) \
V(ToNumber) \
- V(NumberToString) \
+ V(NumberToStringSkipCache) \
/* Type checks */ \
V(IsJSReceiver) \
V(IsSmi) \
@@ -349,7 +343,11 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(AllocateSeqOneByteString) \
V(AllocateSeqTwoByteString) \
V(ObjectCreate) \
+ V(ObjectEntries) \
+ V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectValues) \
+ V(ObjectValuesSkipFastPath) \
V(ArrayIndexOf) \
V(ArrayIncludes_Slow) \
V(ArrayIsArray) \
@@ -361,6 +359,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowRangeError) \
V(ToName) \
V(GetOwnPropertyDescriptor) \
+ V(StackGuard) \
/* Misc. */ \
V(Call) \
V(MaxSmi) \
@@ -522,6 +521,8 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kArrayPrototypeValues:
case Builtins::kArrayIncludes:
case Builtins::kArrayPrototypeEntries:
+ case Builtins::kArrayPrototypeFind:
+ case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
@@ -751,16 +752,29 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
? info->lazy_deserialization_builtin_id()
: info->code()->builtin_index();
DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
- if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
+ if (Builtins::IsBuiltinId(builtin_index) &&
BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
#ifdef DEBUG
- if (info->code()->builtin_index() == Builtins::kDeserializeLazy) {
- return true; // Target builtin is not yet deserialized.
+ Isolate* isolate = info->GetIsolate();
+ Code* code = isolate->builtins()->builtin(builtin_index);
+ if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ // Target builtin is not yet deserialized. Deserialize it now.
+
+ DCHECK(Builtins::IsLazy(builtin_index));
+ DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_index));
+
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing builtin %s\n",
+ Builtins::name(builtin_index));
+ }
+
+ code = Snapshot::DeserializeBuiltin(isolate, builtin_index);
+ DCHECK_NE(Builtins::kDeserializeLazy, code->builtin_index());
}
// TODO(yangguo): Check builtin-to-builtin calls too.
int mode = RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
bool failed = false;
- for (RelocIterator it(info->code(), mode); !it.done(); it.next()) {
+ for (RelocIterator it(code, mode); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
Address address = rinfo->target_external_reference();
const Runtime::Function* function = Runtime::FunctionForEntry(address);
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 9b669ea096..6b4f8c23f6 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -9,15 +9,11 @@
#include "src/frames.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/wasm/wasm-interpreter.h"
namespace v8 {
namespace internal {
-// Forward declaration:
-namespace wasm {
-class InterpretedFrame;
-}
-
class FrameInspector {
public:
FrameInspector(StandardFrame* frame, int inlined_frame_index,
@@ -61,7 +57,7 @@ class FrameInspector {
StandardFrame* frame_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
- std::unique_ptr<wasm::InterpretedFrame> wasm_interpreted_frame_;
+ wasm::WasmInterpreter::FramePtr wasm_interpreted_frame_;
Isolate* isolate_;
Handle<Script> script_;
Handle<Object> receiver_;
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index c89849e350..6288c11b94 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -105,10 +105,7 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
}
} else {
DCHECK_EQ(debug::TypeProfile::Mode::kCollect, mode);
- if (isolate->factory()->feedback_vectors_for_profiling_tools()->IsUndefined(
- isolate)) {
- isolate->InitializeVectorListFromHeap();
- }
+ isolate->MaybeInitializeVectorListFromHeap();
}
isolate->set_type_profile_mode(mode);
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 78cb102fa8..c087a0868c 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1782,13 +1782,13 @@ void Debug::RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
if (*code == *BUILTIN_CODE(isolate_, AsyncFunctionPromiseCreate)) {
type = debug::kDebugAsyncFunctionPromiseCreated;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseThen)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeThen)) {
type = debug::kDebugPromiseThen;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseCatch)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeCatch)) {
type = debug::kDebugPromiseCatch;
last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromiseFinally)) {
+ } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeFinally)) {
type = debug::kDebugPromiseFinally;
last_frame_was_promise_builtin = true;
}
@@ -2147,7 +2147,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
return false;
}
Deoptimizer::DeoptimizeFunction(*function);
- if (!function->shared()->HasNoSideEffect()) {
+ if (!SharedFunctionInfo::HasNoSideEffect(handle(function->shared()))) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
function->shared()->DebugName()->ToCString().get());
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 9180608b21..a2b22d58d4 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -840,7 +840,7 @@ void LiveEdit::ReplaceFunctionCode(
}
shared_info->set_scope_info(new_shared_info->scope_info());
shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
- shared_info->DisableOptimization(kLiveEdit);
+ shared_info->DisableOptimization(BailoutReason::kLiveEdit);
// Update the type feedback vector, if needed.
Handle<FeedbackMetadata> new_feedback_metadata(
new_shared_info->feedback_metadata());
@@ -898,7 +898,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Isolate* isolate = function_wrapper->GetIsolate();
CHECK(script_handle->IsScript() || script_handle->IsUndefined(isolate));
SharedFunctionInfo::SetScript(shared_info, script_handle);
- shared_info->DisableOptimization(kLiveEdit);
+ shared_info->DisableOptimization(BailoutReason::kLiveEdit);
function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1009,7 +1009,8 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_column_offset(original->column_offset());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
- copy->set_eval_from_shared(original->eval_from_shared());
+ copy->set_eval_from_shared_or_wrapped_arguments(
+ original->eval_from_shared_or_wrapped_arguments());
copy->set_eval_from_position(original->eval_from_position());
Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 8e9a5bf3da..15d5e64258 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -569,7 +569,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
+ return '' + this.value_;
};
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index ddfe637293..3fabf555be 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -11,24 +11,16 @@ namespace v8 {
namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
- V(AccessCheck, "Access check needed") \
- V(NoReason, "no reason") \
V(ArrayBufferWasNeutered, "array buffer was neutered") \
- V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
- V(ConversionOverflow, "conversion overflow") \
V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
+ V(CouldNotGrowElements, "failed to grow elements store") \
+ V(DeoptimizeNow, "%_DeoptimizeNow") \
V(DivisionByZero, "division by zero") \
- V(ExpectedHeapNumber, "Expected heap number") \
- V(ExpectedSmi, "Expected smi") \
- V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
- V(InsufficientTypeFeedbackForCallWithArguments, \
- "Insufficient type feedback for call with arguments") \
V(InsufficientTypeFeedbackForConstruct, \
"Insufficient type feedback for construct") \
- V(FastPathFailed, "Falling off the fast path") \
V(InsufficientTypeFeedbackForForIn, "Insufficient type feedback for for-in") \
V(InsufficientTypeFeedbackForBinaryOperation, \
"Insufficient type feedback for binary operation") \
@@ -40,48 +32,28 @@ namespace internal {
"Insufficient type feedback for generic keyed access") \
V(InsufficientTypeFeedbackForUnaryOperation, \
"Insufficient type feedback for unary operation") \
- V(KeyIsNegative, "key is negative") \
V(LostPrecision, "lost precision") \
V(LostPrecisionOrNaN, "lost precision or NaN") \
- V(MementoFound, "memento found") \
V(MinusZero, "minus zero") \
V(NaN, "NaN") \
- V(NegativeKeyEncountered, "Negative key encountered") \
- V(NegativeValue, "negative value") \
V(NoCache, "no cache") \
V(NotAHeapNumber, "not a heap number") \
- V(NotAHeapNumberUndefined, "not a heap number/undefined") \
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
- V(OutsideOfRange, "Outside of range") \
V(Overflow, "overflow") \
- V(Proxy, "proxy") \
V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \
- V(ReceiverWasAGlobalObject, "receiver was a global object") \
V(Smi, "Smi") \
- V(TooManyArguments, "too many arguments") \
- V(TracingElementsTransitions, "Tracing elements transitions") \
- V(TypeMismatchBetweenFeedbackAndConstant, \
- "Type mismatch between feedback and constant") \
- V(UnexpectedCellContentsInConstantGlobalStore, \
- "Unexpected cell contents in constant global store") \
- V(UnexpectedCellContentsInGlobalStore, \
- "Unexpected cell contents in global store") \
- V(UnexpectedObject, "unexpected object") \
- V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
- V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
- V(UnknownMapInPolymorphicElementAccess, \
- "Unknown map in polymorphic element access") \
- V(UnknownMap, "Unknown map") \
+ V(Unknown, "(unknown)") \
V(ValueMismatch, "value mismatch") \
+ V(WrongCallTarget, "wrong call target") \
+ V(WrongEnumIndices, "wrong enum indices") \
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(WrongName, "wrong name") \
- V(UndefinedOrNullInForIn, "null or undefined in for-in") \
- V(UndefinedOrNullInToObject, "null or undefined in ToObject")
+ V(WrongValue, "wrong value")
enum class DeoptimizeReason : uint8_t {
#define DEOPTIMIZE_REASON(Name, message) k##Name,
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index ac6818ed0d..362bd12cb6 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -19,6 +19,8 @@
#include "src/tracing/trace-event.h"
#include "src/v8.h"
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
@@ -267,7 +269,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
@@ -288,7 +290,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
@@ -319,7 +321,7 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
Isolate* isolate = function->GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::DeoptimizeCode);
+ RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (code == nullptr) code = function->code();
@@ -381,7 +383,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
- preserve_optimized_(false),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
deoptimizing_throw_(false),
@@ -555,6 +556,10 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
return -1;
}
+bool ShouldPadArguments(int arg_count) {
+ return kPadArguments && (arg_count % 2 != 0);
+}
+
} // namespace
// We rely on this function not causing a GC. It is called from generated code
@@ -600,7 +605,8 @@ void Deoptimizer::DoComputeOutputFrames() {
input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
caller_frame_top_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
- compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
+ compiled_code_->PrintDeoptLocation(
+ trace_scope_->file(), " ;;; deoptimize at ", from_);
}
}
@@ -727,7 +733,8 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by InterpreterFrameConstants.
+ // the part described by InterpreterFrameConstants. This will include
+ // argument padding, when needed.
unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -752,12 +759,20 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
+
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
@@ -977,6 +992,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+ int parameter_count = height;
+ if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+
TranslatedFrame::iterator function_iterator = value_iterator;
Object* function = value_iterator->GetRawValue();
value_iterator++;
@@ -990,7 +1008,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- int parameter_count = height;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
@@ -1009,14 +1026,21 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
+ // Compute the incoming parameter translation.
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t value;
@@ -1080,6 +1104,10 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+
DCHECK_EQ(0, output_offset);
Builtins* builtins = isolate_->builtins();
@@ -1125,6 +1153,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
}
+ int parameter_count = height;
+ if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
@@ -1140,8 +1171,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size);
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, parameter_count);
// Construct stub can not be topmost.
DCHECK(frame_index > 0 && frame_index < output_count_);
@@ -1154,9 +1185,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
unsigned output_offset = output_frame_size;
+
+ if (ShouldPadArguments(parameter_count)) {
+ output_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding ");
+ }
+
+ // Compute the incoming parameter translation.
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
// The allocated receiver of a construct stub frame is passed as the
@@ -1167,6 +1204,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
(i == 0) ? reinterpret_cast<Address>(top_address) : nullptr);
}
+ DCHECK_EQ(output_offset, output_frame->GetLastArgumentSlotOffset());
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
@@ -1224,10 +1262,21 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
"constructor function ");
// The deopt info contains the implicit receiver or the new target at the
- // position of the receiver. Copy it to the top of stack.
+ // position of the receiver. Copy it to the top of stack, with the hole value
+ // as padding to maintain alignment.
output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_offset, "padding");
+
+ output_offset -= kPointerSize;
+
+ if (ShouldPadArguments(parameter_count)) {
+ value = output_frame->GetFrameSlot(output_frame_size - 2 * kPointerSize);
+ } else {
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ }
output_frame->SetFrameSlot(output_offset, value);
+
if (bailout_id == BailoutId::ConstructStubCreate()) {
DebugPrintOutputSlot(value, frame_index, output_offset, "new target\n");
} else {
@@ -1371,7 +1420,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// parameter count.
int stack_param_count = height_in_words - register_parameter_count - 1;
if (must_handle_result) stack_param_count++;
- int output_frame_size =
+ unsigned output_frame_size =
kPointerSize * (stack_param_count + allocatable_register_count +
padding_slot_count) +
BuiltinContinuationFrameConstants::kFixedFrameSize;
@@ -1413,9 +1462,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
stack_param_count);
}
- unsigned output_frame_offset = output_frame_size;
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size);
+ int translated_stack_parameters =
+ must_handle_result ? stack_param_count - 1 : stack_param_count;
+
+ if (ShouldPadArguments(stack_param_count)) output_frame_size += kPointerSize;
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, stack_param_count);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1446,8 +1498,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
intptr_t value;
- int translated_stack_parameters =
- must_handle_result ? stack_param_count - 1 : stack_param_count;
+ unsigned output_frame_offset = output_frame_size;
+ if (ShouldPadArguments(stack_param_count)) {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), 0, frame_index,
+ output_frame_offset, "padding ");
+ }
for (int i = 0; i < translated_stack_parameters; ++i) {
output_frame_offset -= kPointerSize;
@@ -1462,6 +1518,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
"placeholder for return result on lazy deopt ");
}
+ DCHECK_EQ(output_frame_offset, output_frame->GetLastArgumentSlotOffset());
+
for (int i = 0; i < register_parameter_count; ++i) {
Object* object = value_iterator->GetRawValue();
int code = continuation_descriptor.GetRegisterParameter(i).code();
@@ -1612,13 +1670,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
output_frame->SetRegister(context_reg.code(), context_value);
}
- // TODO(6898): For eager deopts within builtin stub frames we currently skip
- // marking the underlying function as deoptimized. This is to avoid deopt
- // loops where we would just generate the same optimized code all over again.
- if (is_topmost && bailout_type_ != LAZY) {
- preserve_optimized_ = true;
- }
-
// Ensure the frame pointer register points to the callee's frame. The builtin
// will build its own frame once we continue to it.
Register fp_reg = JavaScriptFrame::fp_register();
@@ -1663,6 +1714,15 @@ void Deoptimizer::MaterializeHeapObjects() {
reinterpret_cast<intptr_t>(*value);
}
+ translated_state_.VerifyMaterializedObjects();
+
+ bool feedback_updated = translated_state_.DoUpdateFeedback();
+ if (trace_scope_ != nullptr && feedback_updated) {
+ PrintF(trace_scope_->file(), "Feedback updated");
+ compiled_code_->PrintDeoptLocation(trace_scope_->file(),
+ " from deoptimization at ", from_);
+ }
+
isolate_->materialized_object_store()->Remove(
reinterpret_cast<Address>(stack_fp_));
}
@@ -1747,14 +1807,6 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
}
// static
-unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
- // The fixed part of the frame consists of the return address, frame
- // pointer, function, context, and all the incoming arguments.
- return ComputeIncomingArgumentSize(shared) +
- StandardFrameConstants::kFixedFrameSize;
-}
-
-// static
unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, bytecode offset and all the incoming arguments.
@@ -1764,7 +1816,9 @@ unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
- return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
+ int parameter_slots = shared->internal_formal_parameter_count() + 1;
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+ return parameter_slots * kPointerSize;
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
@@ -2001,6 +2055,11 @@ void Translation::StoreLiteral(int literal_id) {
buffer_->Add(literal_id);
}
+void Translation::AddUpdateFeedback(int vector_literal, int slot) {
+ buffer_->Add(UPDATE_FEEDBACK);
+ buffer_->Add(vector_literal);
+ buffer_->Add(slot);
+}
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
@@ -2028,9 +2087,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
- case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
+ case UPDATE_FEEDBACK:
return 2;
+ case BEGIN:
case INTERPRETED_FRAME:
case CONSTRUCT_STUB_FRAME:
case BUILTIN_CONTINUATION_FRAME:
@@ -2229,7 +2289,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
SourcePosition last_position = SourcePosition::Unknown();
- DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
+ DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
int last_deopt_id = kNoDeoptimizationId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
@@ -2385,9 +2445,8 @@ int TranslatedValue::object_index() const {
Object* TranslatedValue::GetRawValue() const {
// If we have a value, return it.
- Handle<Object> result_handle;
- if (value_.ToHandle(&result_handle)) {
- return *result_handle;
+ if (materialization_state() == kFinished) {
+ return *storage_;
}
// Otherwise, do a best effort to get the value without allocation.
@@ -2429,11 +2488,15 @@ Object* TranslatedValue::GetRawValue() const {
return isolate()->heap()->arguments_marker();
}
+void TranslatedValue::set_initialized_storage(Handle<Object> storage) {
+ DCHECK_EQ(kUninitialized, materialization_state());
+ storage_ = storage;
+ materialization_state_ = kFinished;
+}
Handle<Object> TranslatedValue::GetValue() {
- Handle<Object> result;
// If we already have a value, then get it.
- if (value_.ToHandle(&result)) return result;
+ if (materialization_state() == kFinished) return storage_;
// Otherwise we have to materialize.
switch (kind()) {
@@ -2444,12 +2507,27 @@ Handle<Object> TranslatedValue::GetValue() {
case TranslatedValue::kFloat:
case TranslatedValue::kDouble: {
MaterializeSimple();
- return value_.ToHandleChecked();
+ return storage_;
}
case TranslatedValue::kCapturedObject:
- case TranslatedValue::kDuplicatedObject:
- return container_->MaterializeObjectAt(object_index());
+ case TranslatedValue::kDuplicatedObject: {
+ // We need to materialize the object (or possibly even object graphs).
+ // To make the object verifier happy, we materialize in two steps.
+
+ // 1. Allocate storage for reachable objects. This makes sure that for
+ // each object we have allocated space on heap. The space will be
+ // a byte array that will be later initialized, or a fully
+ // initialized object if it is safe to allocate one that will
+ // pass the verifier.
+ container_->EnsureObjectAllocatedAt(this);
+
+ // 2. Initialize the objects. If we have allocated only byte arrays
+ // for some objects, we now overwrite the byte arrays with the
+ // correct object fields. Note that this phase does not allocate
+ // any new objects, so it does not trigger the object verifier.
+ return container_->InitializeObjectAt(this);
+ }
case TranslatedValue::kInvalid:
FATAL("unexpected case");
@@ -2460,36 +2538,39 @@ Handle<Object> TranslatedValue::GetValue() {
return Handle<Object>::null();
}
-
void TranslatedValue::MaterializeSimple() {
// If we already have materialized, return.
- if (!value_.is_null()) return;
+ if (materialization_state() == kFinished) return;
Object* raw_value = GetRawValue();
if (raw_value != isolate()->heap()->arguments_marker()) {
// We can get the value without allocation, just return it here.
- value_ = Handle<Object>(raw_value, isolate());
+ set_initialized_storage(Handle<Object>(raw_value, isolate()));
return;
}
switch (kind()) {
case kInt32:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(int32_value())));
return;
case kUInt32:
- value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(uint32_value())));
return;
case kFloat: {
double scalar_value = float_value().get_scalar();
- value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
return;
}
case kDouble: {
double scalar_value = double_value().get_scalar();
- value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
+ set_initialized_storage(
+ Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
return;
}
@@ -2551,7 +2632,7 @@ Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
void TranslatedValue::Handlify() {
if (kind() == kTagged) {
- value_ = Handle<Object>(raw_literal(), isolate());
+ set_initialized_storage(Handle<Object>(raw_literal(), isolate()));
raw_literal_ = nullptr;
}
}
@@ -2712,7 +2793,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
bailout_id, shared_info, height_with_context);
}
-
+ case Translation::UPDATE_FEEDBACK:
case Translation::BEGIN:
case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_ELEMENTS:
@@ -2802,6 +2883,7 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)",
object_index, static_cast<uint8_t>(type), length);
}
+
object_positions_.push_back({frame_index, value_index});
frame.Add(TranslatedValue::NewDeferredObject(
this, length + FixedArray::kHeaderSize / kPointerSize, object_index));
@@ -2855,6 +2937,7 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
case Translation::BUILTIN_CONTINUATION_FRAME:
+ case Translation::UPDATE_FEEDBACK:
// Peeled off before getting here.
break;
@@ -3117,8 +3200,7 @@ int TranslatedState::CreateNextTranslatedValue(
FATAL("We should never get here - unexpected deopt info.");
}
-TranslatedState::TranslatedState(const JavaScriptFrame* frame)
- : isolate_(nullptr), stack_frame_pointer_(nullptr) {
+TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationData* data =
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
@@ -3131,9 +3213,6 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
frame->function()->shared()->internal_formal_parameter_count());
}
-TranslatedState::TranslatedState()
- : isolate_(nullptr), stack_frame_pointer_(nullptr) {}
-
void TranslatedState::Init(Address input_frame_pointer,
TranslationIterator* iterator,
FixedArray* literal_array, RegisterValues* registers,
@@ -3149,9 +3228,15 @@ void TranslatedState::Init(Address input_frame_pointer,
CHECK(opcode == Translation::BEGIN);
int count = iterator->Next();
+ frames_.reserve(count);
iterator->Next(); // Drop JS frames count.
+ int update_feedback_count = iterator->Next();
+ CHECK_GE(update_feedback_count, 0);
+ CHECK_LE(update_feedback_count, 1);
- frames_.reserve(count);
+ if (update_feedback_count == 1) {
+ ReadUpdateFeedback(iterator, literal_array, trace_file);
+ }
std::stack<int> nested_counts;
@@ -3209,563 +3294,472 @@ void TranslatedState::Init(Address input_frame_pointer,
void TranslatedState::Prepare(Address stack_frame_pointer) {
for (auto& frame : frames_) frame.Handlify();
+ if (feedback_vector_ != nullptr) {
+ feedback_vector_handle_ =
+ Handle<FeedbackVector>(feedback_vector_, isolate());
+ feedback_vector_ = nullptr;
+ }
stack_frame_pointer_ = stack_frame_pointer;
UpdateFromPreviouslyMaterializedObjects();
}
-class TranslatedState::CapturedObjectMaterializer {
- public:
- CapturedObjectMaterializer(TranslatedState* state, int frame_index,
- int field_count)
- : state_(state), frame_index_(frame_index), field_count_(field_count) {}
-
- // Ensure the properties never contain mutable heap numbers. This is necessary
- // because the deoptimizer generalizes all maps to tagged representation
- // fields (so mutable heap numbers are not allowed).
- static void EnsurePropertiesGeneralized(Handle<Object> properties_or_hash) {
- if (properties_or_hash->IsPropertyArray()) {
- Handle<PropertyArray> properties =
- Handle<PropertyArray>::cast(properties_or_hash);
- int length = properties->length();
- for (int i = 0; i < length; i++) {
- if (properties->get(i)->IsMutableHeapNumber()) {
- Handle<HeapObject> box(HeapObject::cast(properties->get(i)));
- box->set_map(properties->GetIsolate()->heap()->heap_number_map());
- }
+TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ return &(frames_[pos.frame_index_].values_[pos.value_index_]);
+}
+
+Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
+
+ DisallowHeapAllocation no_allocation;
+ if (slot->materialization_state() != TranslatedValue::kFinished) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_finished();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ InitializeCapturedObjectAt(index, &worklist, no_allocation);
+ }
+ }
+ return slot->GetStorage();
+}
+
+void TranslatedState::InitializeCapturedObjectAt(
+ int object_index, std::stack<int>* worklist,
+ const DisallowHeapAllocation& no_allocation) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Ensure all fields are initialized.
+ int children_init_index = value_index;
+ for (int i = 0; i < slot->GetChildrenCount(); i++) {
+ // If the field is an object that has not been initialized yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(children_init_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() != TranslatedValue::kFinished) {
+ DCHECK_EQ(TranslatedValue::kAllocated,
+ child_slot->materialization_state());
+ worklist->push(child_slot->object_index());
+ child_slot->mark_finished();
}
}
+ SkipSlots(1, frame, &children_init_index);
}
- Handle<Object> FieldAt(int* value_index) {
- CHECK_GT(field_count_, 0);
- --field_count_;
- Handle<Object> object = state_->MaterializeAt(frame_index_, value_index);
- // This is a big hammer to make sure that the materialized objects do not
- // have property arrays with mutable heap numbers (mutable heap numbers are
- // bad because we generalize maps for all materialized objects).
- EnsurePropertiesGeneralized(object);
- return object;
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
+ switch (map->instance_type()) {
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return;
+
+ case FIXED_ARRAY_TYPE:
+ case HASH_TABLE_TYPE:
+ case PROPERTY_ARRAY_TYPE:
+ case CONTEXT_EXTENSION_TYPE:
+ InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map,
+ no_allocation);
+ break;
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ InitializeJSObjectAt(frame, &value_index, slot, map, no_allocation);
+ break;
}
+ CHECK_EQ(value_index, children_init_index);
+}
- ~CapturedObjectMaterializer() { CHECK_EQ(0, field_count_); }
+void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
- private:
- TranslatedState* state_;
- int frame_index_;
- int field_count_;
+ if (slot->materialization_state() == TranslatedValue::kUninitialized) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_allocated();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ EnsureCapturedObjectAllocatedAt(index, &worklist);
+ }
+ }
+}
+
+void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot,
+ Handle<Map> map) {
+ int length = Smi::cast(frame->values_[*value_index].GetRawValue())->value();
+ (*value_index)++;
+ Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
+ isolate()->factory()->NewFixedDoubleArray(length));
+ CHECK_GT(length, 0);
+ for (int i = 0; i < length; i++) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ if (value->IsNumber()) {
+ array->set(i, value->Number());
+ } else {
+ CHECK(value.is_identical_to(isolate()->factory()->the_hole_value()));
+ array->set_the_hole(isolate(), i);
+ }
+ (*value_index)++;
+ }
+ slot->set_storage(array);
+}
+
+void TranslatedState::MaterializeMutableHeapNumber(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ Handle<HeapNumber> box;
+ CHECK(value->IsNumber());
+ box = isolate()->factory()->NewHeapNumber(value->Number(), MUTABLE);
+ (*value_index)++;
+ slot->set_storage(box);
+}
+
+namespace {
+
+enum DoubleStorageKind : uint8_t {
+ kStoreTagged,
+ kStoreUnboxedDouble,
+ kStoreMutableHeapNumber,
};
-Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
- TranslatedValue* slot, int frame_index, int* value_index) {
- int length = slot->GetChildrenCount();
+} // namespace
- CapturedObjectMaterializer materializer(this, frame_index, length);
+void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame,
+ int* value_index) {
+ while (slots_to_skip > 0) {
+ TranslatedValue* slot = &(frame->values_[*value_index]);
+ (*value_index)++;
+ slots_to_skip--;
- Handle<Object> result;
- if (slot->value_.ToHandle(&result)) {
- // This has been previously materialized, return the previous value.
- // We still need to skip all the nested objects.
- for (int i = 0; i < length; i++) {
- materializer.FieldAt(value_index);
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ slots_to_skip += slot->GetChildrenCount();
}
-
- return result;
}
+}
- Handle<Object> map_object = materializer.FieldAt(value_index);
- Handle<Map> map = Map::GeneralizeAllFields(Handle<Map>::cast(map_object));
+void TranslatedState::EnsureCapturedObjectAllocatedAt(
+ int object_index, std::stack<int>* worklist) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
switch (map->instance_type()) {
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ // Materialize (i.e. allocate&initialize) the array and return since
+ // there is no need to process the children.
+ return MaterializeFixedDoubleArray(frame, &value_index, slot, map);
+
case MUTABLE_HEAP_NUMBER_TYPE:
- case HEAP_NUMBER_TYPE: {
- // Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly.
- Handle<Object> object = materializer.FieldAt(value_index);
- slot->value_ = object;
- // On 32-bit architectures, there is an extra slot there because
- // the escape analysis calculates the number of slots as
- // object-size/pointer-size. To account for this, we read out
- // any extra slots.
- for (int i = 0; i < length - 2; i++) {
- materializer.FieldAt(value_index);
- }
- return object;
- }
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE: {
- Handle<JSObject> object =
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_SET_KEY_VALUE_ITERATOR_TYPE:
- case JS_SET_VALUE_ITERATOR_TYPE: {
- Handle<JSSetIterator> object = Handle<JSSetIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> table = materializer.FieldAt(value_index);
- Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_table(*table);
- object->set_index(*index);
- return object;
- }
- case JS_MAP_KEY_ITERATOR_TYPE:
- case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
- case JS_MAP_VALUE_ITERATOR_TYPE: {
- Handle<JSMapIterator> object = Handle<JSMapIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> table = materializer.FieldAt(value_index);
- Handle<Object> index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_table(*table);
- object->set_index(*index);
- return object;
- }
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
- {
- Handle<JSArrayIterator> object = Handle<JSArrayIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- // Initialize the index to zero to make the heap verifier happy.
- object->set_index(Smi::FromInt(0));
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> iterated_object = materializer.FieldAt(value_index);
- Handle<Object> next_index = materializer.FieldAt(value_index);
- Handle<Object> iterated_object_map = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_object(*iterated_object);
- object->set_index(*next_index);
- object->set_object_map(*iterated_object_map);
- return object;
- }
- case JS_STRING_ITERATOR_TYPE: {
- Handle<JSStringIterator> object = Handle<JSStringIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- // Initialize the index to zero to make the heap verifier happy.
- object->set_index(0);
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> iterated_string = materializer.FieldAt(value_index);
- Handle<Object> next_index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- CHECK(iterated_string->IsString());
- object->set_string(String::cast(*iterated_string));
- CHECK(next_index->IsSmi());
- object->set_index(Smi::ToInt(*next_index));
- return object;
- }
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: {
- Handle<JSAsyncFromSyncIterator> object =
- Handle<JSAsyncFromSyncIterator>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> sync_iterator = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_sync_iterator(JSReceiver::cast(*sync_iterator));
- return object;
- }
- case JS_ARRAY_TYPE: {
- Handle<JSArray> object = Handle<JSArray>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> array_length = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_length(*array_length);
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_BOUND_FUNCTION_TYPE: {
- Handle<JSBoundFunction> object = Handle<JSBoundFunction>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> bound_target_function = materializer.FieldAt(value_index);
- Handle<Object> bound_this = materializer.FieldAt(value_index);
- Handle<Object> bound_arguments = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_bound_target_function(
- JSReceiver::cast(*bound_target_function));
- object->set_bound_this(*bound_this);
- object->set_bound_arguments(FixedArray::cast(*bound_arguments));
- return object;
- }
- case JS_FUNCTION_TYPE: {
- Handle<JSFunction> object = isolate_->factory()->NewFunction(
- map, handle(isolate_->object_function()->shared()),
- handle(isolate_->context()), NOT_TENURED);
- slot->value_ = object;
- // We temporarily allocated a JSFunction for the {Object} function
- // within the current context, to break cycles in the object graph.
- // The correct function and context will be set below once available.
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> shared = materializer.FieldAt(value_index);
- Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> vector_cell = materializer.FieldAt(value_index);
- Handle<Object> code = materializer.FieldAt(value_index);
- bool has_prototype_slot = map->has_prototype_slot();
- Handle<Object> prototype;
- if (has_prototype_slot) {
- prototype = materializer.FieldAt(value_index);
- }
- object->set_map(*map);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_shared(SharedFunctionInfo::cast(*shared));
- object->set_context(Context::cast(*context));
- object->set_feedback_vector_cell(Cell::cast(*vector_cell));
- object->set_code(Code::cast(*code));
- if (has_prototype_slot) {
- object->set_prototype_or_initial_map(*prototype);
- }
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE: {
- Handle<JSGeneratorObject> object = Handle<JSGeneratorObject>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> function = materializer.FieldAt(value_index);
- Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> receiver = materializer.FieldAt(value_index);
- Handle<Object> input_or_debug_pos = materializer.FieldAt(value_index);
- Handle<Object> resume_mode = materializer.FieldAt(value_index);
- Handle<Object> continuation_offset = materializer.FieldAt(value_index);
- Handle<Object> register_file = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_function(JSFunction::cast(*function));
- object->set_context(Context::cast(*context));
- object->set_receiver(*receiver);
- object->set_input_or_debug_pos(*input_or_debug_pos);
- object->set_resume_mode(Smi::ToInt(*resume_mode));
- object->set_continuation(Smi::ToInt(*continuation_offset));
- object->set_register_file(FixedArray::cast(*register_file));
-
- if (object->IsJSAsyncGeneratorObject()) {
- auto generator = Handle<JSAsyncGeneratorObject>::cast(object);
- Handle<Object> queue = materializer.FieldAt(value_index);
- Handle<Object> awaited_promise = materializer.FieldAt(value_index);
- generator->set_queue(HeapObject::cast(*queue));
- generator->set_awaited_promise(HeapObject::cast(*awaited_promise));
+ // Materialize (i.e. allocate&initialize) the heap number and return.
+ // There is no need to process the children.
+ return MaterializeMutableHeapNumber(frame, &value_index, slot);
+
+ case FIXED_ARRAY_TYPE:
+ case HASH_TABLE_TYPE: {
+ // Check we have the right size.
+ int array_length =
+ Smi::cast(frame->values_[value_index].GetRawValue())->value();
+
+ int instance_size = FixedArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+
+ // Canonicalize empty fixed array.
+ if (*map == isolate()->heap()->empty_fixed_array()->map() &&
+ array_length == 0) {
+ slot->set_storage(isolate()->factory()->empty_fixed_array());
+ } else {
+ slot->set_storage(AllocateStorageFor(slot));
}
- int in_object_properties = map->GetInObjectProperties();
- for (int i = 0; i < in_object_properties; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case CONS_STRING_TYPE: {
- Handle<ConsString> object = Handle<ConsString>::cast(
- isolate_->factory()
- ->NewConsString(isolate_->factory()->undefined_string(),
- isolate_->factory()->undefined_string())
- .ToHandleChecked());
- slot->value_ = object;
- Handle<Object> hash = materializer.FieldAt(value_index);
- Handle<Object> string_length = materializer.FieldAt(value_index);
- Handle<Object> first = materializer.FieldAt(value_index);
- Handle<Object> second = materializer.FieldAt(value_index);
- object->set_map(*map);
- object->set_length(Smi::ToInt(*string_length));
- object->set_first(String::cast(*first));
- object->set_second(String::cast(*second));
- CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
- return object;
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case CONTEXT_EXTENSION_TYPE: {
- Handle<ContextExtension> object =
- isolate_->factory()->NewContextExtension(
- isolate_->factory()->NewScopeInfo(1),
- isolate_->factory()->undefined_value());
- slot->value_ = object;
- Handle<Object> scope_info = materializer.FieldAt(value_index);
- Handle<Object> extension = materializer.FieldAt(value_index);
- object->set_scope_info(ScopeInfo::cast(*scope_info));
- object->set_extension(*extension);
- return object;
+
+ case PROPERTY_ARRAY_TYPE: {
+ // Check we have the right size.
+ int length_or_hash =
+ Smi::cast(frame->values_[value_index].GetRawValue())->value();
+ int array_length = PropertyArray::LengthField::decode(length_or_hash);
+ int instance_size = PropertyArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+
+ slot->set_storage(AllocateStorageFor(slot));
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case HASH_TABLE_TYPE:
- case FIXED_ARRAY_TYPE: {
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<FixedArray> object =
- isolate_->factory()->NewFixedArray(array_length);
- // We need to set the map, because the fixed array we are
- // materializing could be a context or an arguments object,
- // in which case we must retain that information.
- object->set_map(*map);
- slot->value_ = object;
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- object->set(i, *value);
- }
- return object;
+
+ case CONTEXT_EXTENSION_TYPE: {
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kPointerSize);
+ slot->set_storage(AllocateStorageFor(slot));
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
}
- case PROPERTY_ARRAY_TYPE: {
- DCHECK_EQ(*map, isolate_->heap()->property_array_map());
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<PropertyArray> object =
- isolate_->factory()->NewPropertyArray(array_length);
- slot->value_ = object;
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- object->set(i, *value);
- }
- return object;
- }
- case FIXED_DOUBLE_ARRAY_TYPE: {
- DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
- Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t array_length = 0;
- CHECK(lengthObject->ToInt32(&array_length));
- Handle<FixedArrayBase> object =
- isolate_->factory()->NewFixedDoubleArray(array_length);
- slot->value_ = object;
- if (array_length > 0) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(object);
- for (int i = 0; i < array_length; ++i) {
- Handle<Object> value = materializer.FieldAt(value_index);
- if (value.is_identical_to(isolate_->factory()->the_hole_value())) {
- double_array->set_the_hole(isolate_, i);
- } else {
- CHECK(value->IsNumber());
- double_array->set(i, value->Number());
- }
- }
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ EnsureJSObjectAllocated(slot, map);
+ TranslatedValue* properties_slot = &(frame->values_[value_index]);
+ value_index++;
+ if (properties_slot->kind() == TranslatedValue::kCapturedObject) {
+ // If we are materializing the property array, make sure we put
+ // the mutable heap numbers at the right places.
+ EnsurePropertiesAllocatedAndMarked(properties_slot, map);
+ EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame,
+ &value_index, worklist);
}
- return object;
- }
- case JS_REGEXP_TYPE: {
- Handle<JSRegExp> object = Handle<JSRegExp>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> data = materializer.FieldAt(value_index);
- Handle<Object> source = materializer.FieldAt(value_index);
- Handle<Object> flags = materializer.FieldAt(value_index);
- Handle<Object> last_index = materializer.FieldAt(value_index);
- object->set_raw_properties_or_hash(*properties);
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_data(*data);
- object->set_source(*source);
- object->set_flags(*flags);
- object->set_last_index(*last_index);
- return object;
- }
- case STRING_TYPE:
- case ONE_BYTE_STRING_TYPE:
- case CONS_ONE_BYTE_STRING_TYPE:
- case SLICED_STRING_TYPE:
- case SLICED_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case THIN_STRING_TYPE:
- case THIN_ONE_BYTE_STRING_TYPE:
- case INTERNALIZED_STRING_TYPE:
- case ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SYMBOL_TYPE:
- case ODDBALL_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- case JS_VALUE_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_DATE_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_PROMISE_TYPE:
- case JS_PROXY_TYPE:
- case MAP_TYPE:
- case ALLOCATION_SITE_TYPE:
- case ACCESSOR_INFO_TYPE:
- case SHARED_FUNCTION_INFO_TYPE:
- case FUNCTION_TEMPLATE_INFO_TYPE:
- case ACCESSOR_PAIR_TYPE:
- case BYTE_ARRAY_TYPE:
- case BYTECODE_ARRAY_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
- case TRANSITION_ARRAY_TYPE:
- case FEEDBACK_VECTOR_TYPE:
- case FOREIGN_TYPE:
- case SCRIPT_TYPE:
- case CODE_TYPE:
- case PROPERTY_CELL_TYPE:
- case BIGINT_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
- case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE:
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
-#undef FIXED_TYPED_ARRAY_CASE
- case FILLER_TYPE:
- case ACCESS_CHECK_INFO_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case OBJECT_TEMPLATE_INFO_TYPE:
- case ALLOCATION_MEMENTO_TYPE:
- case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
- case PROMISE_REACTION_JOB_INFO_TYPE:
- case DEBUG_INFO_TYPE:
- case STACK_FRAME_INFO_TYPE:
- case CELL_TYPE:
- case WEAK_CELL_TYPE:
- case SMALL_ORDERED_HASH_MAP_TYPE:
- case SMALL_ORDERED_HASH_SET_TYPE:
- case CODE_DATA_CONTAINER_TYPE:
- case PROTOTYPE_INFO_TYPE:
- case TUPLE2_TYPE:
- case TUPLE3_TYPE:
- case ASYNC_GENERATOR_REQUEST_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_TABLE_TYPE:
- OFStream os(stderr);
- os << "[couldn't handle instance type " << map->instance_type() << "]"
- << std::endl;
- UNREACHABLE();
- break;
+ // Make sure all the remaining children (after the map and properties) are
+ // allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame,
+ &value_index, worklist);
}
UNREACHABLE();
}
-Handle<Object> TranslatedState::MaterializeAt(int frame_index,
- int* value_index) {
- CHECK_LT(static_cast<size_t>(frame_index), frames().size());
- TranslatedFrame* frame = &(frames_[frame_index]);
- CHECK_LT(static_cast<size_t>(*value_index), frame->values_.size());
+void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index,
+ std::stack<int>* worklist) {
+ // Ensure all children are allocated.
+ for (int i = 0; i < count; i++) {
+ // If the field is an object that has not been allocated yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(*value_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() ==
+ TranslatedValue::kUninitialized) {
+ worklist->push(child_slot->object_index());
+ child_slot->mark_allocated();
+ }
+ } else {
+ // Make sure the simple values (heap numbers, etc.) are properly
+ // initialized.
+ child_slot->MaterializeSimple();
+ }
+ SkipSlots(1, frame, value_index);
+ }
+}
- TranslatedValue* slot = &(frame->values_[*value_index]);
- (*value_index)++;
+void TranslatedState::EnsurePropertiesAllocatedAndMarked(
+ TranslatedValue* properties_slot, Handle<Map> map) {
+ CHECK_EQ(TranslatedValue::kUninitialized,
+ properties_slot->materialization_state());
- switch (slot->kind()) {
- case TranslatedValue::kTagged:
- case TranslatedValue::kInt32:
- case TranslatedValue::kUInt32:
- case TranslatedValue::kBoolBit:
- case TranslatedValue::kFloat:
- case TranslatedValue::kDouble: {
- slot->MaterializeSimple();
- Handle<Object> value = slot->GetValue();
- if (value->IsMutableHeapNumber()) {
- HeapNumber::cast(*value)->set_map(isolate()->heap()->heap_number_map());
- }
- return value;
+ Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot);
+ properties_slot->mark_allocated();
+ properties_slot->set_storage(object_storage);
+
+ // Set markers for the double properties.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int field_count = map->NumberOfOwnDescriptors();
+ for (int i = 0; i < field_count; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (descriptors->GetDetails(i).representation().IsDouble() &&
+ !index.is_inobject()) {
+ CHECK(!map->IsUnboxedDoubleField(index));
+ int outobject_index = index.outobject_array_index();
+ int array_index = outobject_index * kPointerSize;
+ object_storage->set(array_index, kStoreMutableHeapNumber);
}
+ }
+}
+
+Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
+ int allocate_size =
+ ByteArray::LengthFor(slot->GetChildrenCount() * kPointerSize);
+ // It is important to allocate all the objects tenured so that the marker
+ // does not visit them.
+ Handle<ByteArray> object_storage =
+ isolate()->factory()->NewByteArray(allocate_size, TENURED);
+ for (int i = 0; i < object_storage->length(); i++) {
+ object_storage->set(i, kStoreTagged);
+ }
+ return object_storage;
+}
- case TranslatedValue::kCapturedObject: {
- // The map must be a tagged object.
- CHECK_EQ(frame->values_[*value_index].kind(), TranslatedValue::kTagged);
- CHECK(frame->values_[*value_index].GetValue()->IsMap());
- return MaterializeCapturedObjectAt(slot, frame_index, value_index);
+void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
+ Handle<Map> map) {
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kPointerSize);
+
+ Handle<ByteArray> object_storage = AllocateStorageFor(slot);
+ // Now we handle the interesting (JSObject) case.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int field_count = map->NumberOfOwnDescriptors();
+
+ // Set markers for the double properties.
+ for (int i = 0; i < field_count; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (descriptors->GetDetails(i).representation().IsDouble() &&
+ index.is_inobject()) {
+ CHECK_GE(index.index(), FixedArray::kHeaderSize / kPointerSize);
+ int array_index = index.index() * kPointerSize - FixedArray::kHeaderSize;
+ uint8_t marker = map->IsUnboxedDoubleField(index)
+ ? kStoreUnboxedDouble
+ : kStoreMutableHeapNumber;
+ object_storage->set(array_index, marker);
}
- case TranslatedValue::kDuplicatedObject: {
- int object_index = slot->object_index();
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ }
+ slot->set_storage(object_storage);
+}
+
+Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
+ int* value_index) {
+ TranslatedValue* slot = frame->ValueAt(*value_index);
+ SkipSlots(1, frame, value_index);
+ if (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = ResolveCapturedObject(slot);
+ }
+ CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state());
+ return slot->GetStorage();
+}
- // Make sure the duplicate is referring to a previous object.
- CHECK(pos.frame_index_ < frame_index ||
- (pos.frame_index_ == frame_index &&
- pos.value_index_ < *value_index - 1));
+void TranslatedState::InitializeJSObjectAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+ DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
- Handle<Object> object =
- frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
+ // The object should have at least a map and some payload.
+ CHECK_GE(slot->GetChildrenCount(), 2);
- // The object should have a (non-sentinel) value.
- CHECK(!object.is_null() &&
- !object.is_identical_to(isolate_->factory()->arguments_marker()));
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(
+ *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
- slot->value_ = object;
- return object;
+ // Fill the property array field.
+ {
+ Handle<Object> properties = GetValueAndAdvance(frame, value_index);
+ WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
+ *properties);
+ WRITE_BARRIER(isolate()->heap(), *object_storage,
+ JSObject::kPropertiesOrHashOffset, *properties);
+ }
+
+ // For all the other fields we first look at the fixed array and check the
+ // marker to see if we store an unboxed double.
+ DCHECK_EQ(kPointerSize, JSObject::kPropertiesOrHashOffset);
+ for (int i = 2; i < slot->GetChildrenCount(); i++) {
+ // Initialize and extract the value from its slot.
+ Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
+
+ // Read out the marker and ensure the field is consistent with
+ // what the markers in the storage say (note that all heap numbers
+ // should be fully initialized by now).
+ int offset = i * kPointerSize;
+ uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ if (marker == kStoreUnboxedDouble) {
+ double double_field_value;
+ if (field_value->IsSmi()) {
+ double_field_value = Smi::cast(*field_value)->value();
+ } else {
+ CHECK(field_value->IsHeapNumber());
+ double_field_value = HeapNumber::cast(*field_value)->value();
+ }
+ WRITE_DOUBLE_FIELD(*object_storage, offset, double_field_value);
+ } else if (marker == kStoreMutableHeapNumber) {
+ CHECK(field_value->IsMutableHeapNumber());
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ } else {
+ CHECK_EQ(kStoreTagged, marker);
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
}
+ }
+ object_storage->synchronized_set_map(*map);
+}
- case TranslatedValue::kInvalid:
- UNREACHABLE();
- break;
+void TranslatedState::InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+
+ // Skip the writes if we already have the canonical empty fixed array.
+ if (*object_storage == isolate()->heap()->empty_fixed_array()) {
+ CHECK_EQ(2, slot->GetChildrenCount());
+ Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
+ CHECK_EQ(*length_value, Smi::FromInt(0));
+ return;
}
- FATAL("We should never get here - unexpected deopt slot kind.");
- return Handle<Object>::null();
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(
+ *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
+
+ // Write the fields to the object.
+ for (int i = 1; i < slot->GetChildrenCount(); i++) {
+ Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
+ int offset = i * kPointerSize;
+ uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ if (i > 1 && marker == kStoreMutableHeapNumber) {
+ CHECK(field_value->IsMutableHeapNumber());
+ } else {
+ CHECK(marker == kStoreTagged || i == 1);
+ CHECK(!field_value->IsMutableHeapNumber());
+ }
+
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ }
+
+ object_storage->synchronized_set_map(*map);
}
-Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
- CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
- return MaterializeAt(pos.frame_index_, &(pos.value_index_));
+TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
+ while (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = GetValueByObjectIndex(slot->object_index());
+ }
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+ return slot;
}
TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
@@ -3818,7 +3812,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
bool new_store = false;
if (previously_materialized_objects.is_null()) {
previously_materialized_objects =
- isolate_->factory()->NewFixedArray(length);
+ isolate_->factory()->NewFixedArray(length, TENURED);
for (int i = 0; i < length; i++) {
previously_materialized_objects->set(i, *marker);
}
@@ -3835,6 +3829,10 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
CHECK(value_info->IsMaterializedObject());
+ // Skip duplicate objects (i.e., those that point to some
+ // other object id).
+ if (value_info->object_index() != i) continue;
+
Handle<Object> value(value_info->GetRawValue(), isolate_);
if (!value.is_identical_to(marker)) {
@@ -3878,11 +3876,57 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
&(frames_[pos.frame_index_].values_[pos.value_index_]);
CHECK(value_info->IsMaterializedObject());
- value_info->value_ =
- Handle<Object>(previously_materialized_objects->get(i), isolate_);
+ if (value_info->kind() == TranslatedValue::kCapturedObject) {
+ value_info->set_initialized_storage(
+ Handle<Object>(previously_materialized_objects->get(i), isolate_));
+ }
+ }
+ }
+}
+
+void TranslatedState::VerifyMaterializedObjects() {
+#if VERIFY_HEAP
+ int length = static_cast<int>(object_positions_.size());
+ for (int i = 0; i < length; i++) {
+ TranslatedValue* slot = GetValueByObjectIndex(i);
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
+ if (slot->materialization_state() == TranslatedValue::kFinished) {
+ slot->GetStorage()->ObjectVerify();
+ } else {
+ CHECK_EQ(slot->materialization_state(),
+ TranslatedValue::kUninitialized);
+ }
}
}
+#endif
+}
+
+bool TranslatedState::DoUpdateFeedback() {
+ if (!feedback_vector_handle_.is_null()) {
+ CHECK(!feedback_slot_.IsInvalid());
+ isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
+ CallICNexus nexus(feedback_vector_handle_, feedback_slot_);
+ nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
+ return true;
+ }
+ return false;
+}
+
+void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator,
+ FixedArray* literal_array,
+ FILE* trace_file) {
+ CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next());
+ feedback_vector_ = FeedbackVector::cast(literal_array->get(iterator->Next()));
+ feedback_slot_ = FeedbackSlot(iterator->Next());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
+ feedback_slot_.ToInt());
+ }
}
} // namespace internal
} // namespace v8
+
+// Undefine the heap manipulation macros.
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index b8ab648b9c..e72878654d 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -5,6 +5,7 @@
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
+#include <stack>
#include <vector>
#include "src/allocation.h"
@@ -31,6 +32,9 @@ class TranslatedValue {
// Returns heap()->arguments_marker() if allocation would be
// necessary to get the value.
Object* GetRawValue() const;
+
+ // Getter for the value, takes care of materializing the subgraph
+ // reachable from this value.
Handle<Object> GetValue();
bool IsMaterializedObject() const;
@@ -40,7 +44,7 @@ class TranslatedValue {
friend class TranslatedState;
friend class TranslatedFrame;
- enum Kind {
+ enum Kind : uint8_t {
kInvalid,
kTagged,
kInt32,
@@ -56,9 +60,20 @@ class TranslatedValue {
kDuplicatedObject // Duplicated object of a deferred object.
};
+ enum MaterializationState : uint8_t {
+ kUninitialized,
+ kAllocated, // Storage for the object has been allocated (or
+ // enqueued for allocation).
+ kFinished, // The object has been initialized (or enqueued for
+ // initialization).
+ };
+
TranslatedValue(TranslatedState* container, Kind kind)
: kind_(kind), container_(container) {}
Kind kind() const { return kind_; }
+ MaterializationState materialization_state() const {
+ return materialization_state_;
+ }
void Handlify();
int GetChildrenCount() const;
@@ -76,15 +91,25 @@ class TranslatedValue {
Isolate* isolate() const;
void MaterializeSimple();
+ void set_storage(Handle<HeapObject> storage) { storage_ = storage; }
+ void set_initialized_storage(Handle<Object> storage);
+ void mark_finished() { materialization_state_ = kFinished; }
+ void mark_allocated() { materialization_state_ = kAllocated; }
+
+ Handle<Object> GetStorage() {
+ DCHECK_NE(kUninitialized, materialization_state());
+ return storage_;
+ }
+
Kind kind_;
+ MaterializationState materialization_state_ = kUninitialized;
TranslatedState* container_; // This is only needed for materialization of
// objects and constructing handles (to get
// to the isolate).
- MaybeHandle<Object> value_; // Before handlification, this is always null,
- // after materialization it is never null,
- // in between it is only null if the value needs
- // to be materialized.
+ Handle<Object> storage_; // Contains the materialized value or the
+ // byte-array that will be later morphed into
+ // the materialized object.
struct MaterializedObjectInfo {
int id_;
@@ -211,6 +236,7 @@ class TranslatedFrame {
height_(height) {}
void Add(const TranslatedValue& value) { values_.push_back(value); }
+ TranslatedValue* ValueAt(int index) { return &(values_[index]); }
void Handlify();
Kind kind_;
@@ -242,7 +268,7 @@ class TranslatedFrame {
class TranslatedState {
public:
- TranslatedState();
+ TranslatedState() {}
explicit TranslatedState(const JavaScriptFrame* frame);
void Prepare(Address stack_frame_pointer);
@@ -270,6 +296,9 @@ class TranslatedState {
FixedArray* literal_array, RegisterValues* registers,
FILE* trace_file, int parameter_count);
+ void VerifyMaterializedObjects();
+ bool DoUpdateFeedback();
+
private:
friend TranslatedValue;
@@ -288,19 +317,47 @@ class TranslatedState {
FILE* trace_file);
void UpdateFromPreviouslyMaterializedObjects();
- Handle<Object> MaterializeAt(int frame_index, int* value_index);
- Handle<Object> MaterializeObjectAt(int object_index);
- class CapturedObjectMaterializer;
- Handle<Object> MaterializeCapturedObjectAt(TranslatedValue* slot,
- int frame_index, int* value_index);
+ void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map);
+ void MaterializeMutableHeapNumber(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot);
+
+ void EnsureObjectAllocatedAt(TranslatedValue* slot);
+
+ void SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index);
+
+ Handle<ByteArray> AllocateStorageFor(TranslatedValue* slot);
+ void EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map);
+ void EnsurePropertiesAllocatedAndMarked(TranslatedValue* properties_slot,
+ Handle<Map> map);
+ void EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index, std::stack<int>* worklist);
+ void EnsureCapturedObjectAllocatedAt(int object_index,
+ std::stack<int>* worklist);
+ Handle<Object> InitializeObjectAt(TranslatedValue* slot);
+ void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist,
+ const DisallowHeapAllocation& no_allocation);
+ void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map,
+ const DisallowHeapAllocation& no_allocation);
+ void InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowHeapAllocation& no_allocation);
+
+ void ReadUpdateFeedback(TranslationIterator* iterator,
+ FixedArray* literal_array, FILE* trace_file);
+
+ TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
+ TranslatedValue* GetValueByObjectIndex(int object_index);
+ Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
static Float32 GetFloatSlot(Address fp, int slot_index);
static Float64 GetDoubleSlot(Address fp, int slot_index);
std::vector<TranslatedFrame> frames_;
- Isolate* isolate_;
- Address stack_frame_pointer_;
+ Isolate* isolate_ = nullptr;
+ Address stack_frame_pointer_ = nullptr;
int formal_parameter_count_;
struct ObjectPosition {
@@ -308,6 +365,9 @@ class TranslatedState {
int value_index_;
};
std::deque<ObjectPosition> object_positions_;
+ Handle<FeedbackVector> feedback_vector_handle_;
+ FeedbackVector* feedback_vector_ = nullptr;
+ FeedbackSlot feedback_slot_;
};
@@ -366,7 +426,6 @@ class Deoptimizer : public Malloced {
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
BailoutType bailout_type() const { return bailout_type_; }
- bool preserve_optimized() const { return preserve_optimized_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -488,7 +547,6 @@ class Deoptimizer : public Malloced {
unsigned ComputeInputFrameAboveFpFixedSize() const;
unsigned ComputeInputFrameSize() const;
- static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
@@ -517,7 +575,6 @@ class Deoptimizer : public Malloced {
Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
- bool preserve_optimized_;
Address from_;
int fp_to_sp_delta_;
bool deoptimizing_throw_;
@@ -642,9 +699,15 @@ class FrameDescription {
return *GetFrameSlotPointer(offset);
}
+ unsigned GetLastArgumentSlotOffset() {
+ int parameter_slots = parameter_count();
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+ return GetFrameSize() - parameter_slots * kPointerSize;
+ }
+
Address GetFramePointerAddress() {
- int fp_offset = GetFrameSize() - parameter_count() * kPointerSize -
- StandardFrameConstants::kCallerSPOffset;
+ int fp_offset =
+ GetLastArgumentSlotOffset() - StandardFrameConstants::kCallerSPOffset;
return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
@@ -826,7 +889,8 @@ class TranslationIterator BASE_EMBEDDED {
V(BOOL_STACK_SLOT) \
V(FLOAT_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
- V(LITERAL)
+ V(LITERAL) \
+ V(UPDATE_FEEDBACK)
class Translation BASE_EMBEDDED {
public:
@@ -838,13 +902,12 @@ class Translation BASE_EMBEDDED {
#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
- Zone* zone)
- : buffer_(buffer),
- index_(buffer->CurrentIndex()),
- zone_(zone) {
+ int update_feedback_count, Zone* zone)
+ : buffer_(buffer), index_(buffer->CurrentIndex()), zone_(zone) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
buffer_->Add(jsframe_count);
+ buffer_->Add(update_feedback_count);
}
int index() const { return index_; }
@@ -862,6 +925,7 @@ class Translation BASE_EMBEDDED {
void ArgumentsElements(CreateArgumentsType type);
void ArgumentsLength(CreateArgumentsType type);
void BeginCapturedObject(int length);
+ void AddUpdateFeedback(int vector_literal, int slot);
void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 603f0bbe03..a26517b432 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -17,6 +17,8 @@
#include "src/objects-inl.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -37,21 +39,29 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name =
- code_ == nullptr ? nullptr : code_->GetIsolate()->builtins()->Lookup(pc);
+ if (code_ != nullptr) {
+ Isolate* isolate = code_->GetIsolate();
+ const char* name = isolate->builtins()->Lookup(pc);
- if (name != nullptr) {
- SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
- return v8_buffer_.start();
- }
+ if (name != nullptr) {
+ SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
+ return v8_buffer_.start();
+ }
- if (code_ != nullptr) {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
SNPrintF(v8_buffer_, "%p <+0x%x>", static_cast<void*>(pc), offs);
return v8_buffer_.start();
}
+
+ wasm::WasmCode* wasm_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(pc);
+ if (wasm_code != nullptr) {
+ SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc),
+ GetWasmCodeKindAsString(wasm_code->kind()));
+ return v8_buffer_.start();
+ }
}
return disasm::NameConverter::NameOfAddress(pc);
@@ -155,7 +165,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
}
static int DecodeIt(Isolate* isolate, std::ostream* os,
- const V8NameConverter& converter, byte* begin, byte* end) {
+ const V8NameConverter& converter, byte* begin, byte* end,
+ void* current_pc) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
@@ -232,6 +243,10 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
// Instruction address and instruction offset.
+ if (FLAG_log_colour && prev_pc == current_pc) {
+ // If this is the given "current" pc, make it yellow and bold.
+ out.AddFormatted("\033[33;1m");
+ }
out.AddFormatted("%p %4" V8PRIxPTRDIFF " ", static_cast<void*>(prev_pc),
prev_pc - begin);
@@ -269,6 +284,10 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
}
+ if (FLAG_log_colour && prev_pc == current_pc) {
+ out.AddFormatted("\033[m");
+ }
+
DumpBuffer(os, &out);
}
@@ -287,17 +306,16 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
return static_cast<int>(pc - begin);
}
-
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
- byte* end, Code* code) {
+ byte* end, Code* code, void* current_pc) {
V8NameConverter v8NameConverter(code);
- return DecodeIt(isolate, os, v8NameConverter, begin, end);
+ return DecodeIt(isolate, os, v8NameConverter, begin, end, current_pc);
}
#else // ENABLE_DISASSEMBLER
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
- byte* end, Code* code) {
+ byte* end, Code* code, void* current_pc) {
return 0;
}
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index c0df0e6586..51ed0bf196 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -17,7 +17,7 @@ class Disassembler : public AllStatic {
// instruction could be decoded.
// the code object is used for name resolution and may be null.
static int Decode(Isolate* isolate, std::ostream* os, byte* begin, byte* end,
- Code* code = nullptr);
+ Code* code = nullptr, void* current_pc = nullptr);
};
} // namespace internal
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index 3dbfa46507..f0e413cf33 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -367,7 +367,7 @@ void EhFrameWriter::GetEhFrame(CodeDesc* desc) {
void EhFrameWriter::WriteULeb128(uint32_t value) {
do {
- byte chunk = value & 0x7f;
+ byte chunk = value & 0x7F;
value >>= 7;
if (value != 0) chunk |= 0x80;
WriteByte(chunk);
@@ -378,7 +378,7 @@ void EhFrameWriter::WriteSLeb128(int32_t value) {
static const int kSignBitMask = 0x40;
bool done;
do {
- byte chunk = value & 0x7f;
+ byte chunk = value & 0x7F;
value >>= 7;
done = ((value == 0) && ((chunk & kSignBitMask) == 0)) ||
((value == -1) && ((chunk & kSignBitMask) != 0));
@@ -412,7 +412,7 @@ uint32_t EhFrameIterator::DecodeULeb128(const byte* encoded,
do {
DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
- result |= (*current & 0x7f) << shift;
+ result |= (*current & 0x7F) << shift;
shift += 7;
} while (*current++ >= 128);
@@ -434,7 +434,7 @@ int32_t EhFrameIterator::DecodeSLeb128(const byte* encoded, int* encoded_size) {
do {
chunk = *current++;
DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
- result |= (chunk & 0x7f) << shift;
+ result |= (chunk & 0x7F) << shift;
shift += 7;
} while (chunk >= 128);
@@ -478,7 +478,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
byte bytecode = eh_frame_iterator.GetNextByte();
- if (((bytecode >> EhFrameConstants::kLocationMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kLocationMaskSize) & 0xFF) ==
EhFrameConstants::kLocationTag) {
int value = (bytecode & EhFrameConstants::kLocationMask) *
EhFrameConstants::kCodeAlignmentFactor;
@@ -488,7 +488,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
continue;
}
- if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xFF) ==
EhFrameConstants::kSavedRegisterTag) {
int32_t decoded_offset = eh_frame_iterator.GetNextULeb128();
stream << "| " << DwarfRegisterCodeToString(
@@ -499,7 +499,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
continue;
}
- if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xff) ==
+ if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xFF) ==
EhFrameConstants::kFollowInitialRuleTag) {
stream << "| " << DwarfRegisterCodeToString(
bytecode & EhFrameConstants::kLocationMask)
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 838fa47769..b03f9340f3 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -191,6 +191,43 @@ inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
return packed_kind;
}
+inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
+ ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ if (b == PACKED_SMI_ELEMENTS || b == HOLEY_SMI_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ if (b == PACKED_ELEMENTS || b == HOLEY_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ if (b == PACKED_DOUBLE_ELEMENTS || b == HOLEY_DOUBLE_ELEMENTS) {
+ *a_out = std::max(a, b);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
DCHECK(IsSmiElementsKind(from_kind));
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 22bf8012dd..499af83078 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -12,6 +12,7 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
+#include "src/zone/zone.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -519,6 +520,21 @@ static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
return Just<int64_t>(-1);
}
+// The InternalElementsAccessor is a helper class to expose otherwise protected
+// methods to its subclasses. Namely, we don't want to publicly expose methods
+// that take an entry (instead of an index) as an argument.
+class InternalElementsAccessor : public ElementsAccessor {
+ public:
+ explicit InternalElementsAccessor(const char* name)
+ : ElementsAccessor(name) {}
+
+ virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) = 0;
+
+ virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+};
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -537,10 +553,10 @@ static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
// specialization of SomeElementsAccessor methods).
template <typename Subclass, typename ElementsTraitsParam>
-class ElementsAccessorBase : public ElementsAccessor {
+class ElementsAccessorBase : public InternalElementsAccessor {
public:
explicit ElementsAccessorBase(const char* name)
- : ElementsAccessor(name) { }
+ : InternalElementsAccessor(name) {}
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
@@ -1052,35 +1068,65 @@ class ElementsAccessorBase : public ElementsAccessor {
Isolate* isolate, Handle<JSObject> object,
Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
PropertyFilter filter) {
- int count = 0;
+ DCHECK_EQ(*nof_items, 0);
KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
ALL_PROPERTIES);
Subclass::CollectElementIndicesImpl(
object, handle(object->elements(), isolate), &accumulator);
Handle<FixedArray> keys = accumulator.GetKeys();
- for (int i = 0; i < keys->length(); ++i) {
+ int count = 0;
+ int i = 0;
+ ElementsKind original_elements_kind = object->GetElementsKind();
+
+ for (; i < keys->length(); ++i) {
Handle<Object> key(keys->get(i), isolate);
- Handle<Object> value;
uint32_t index;
if (!key->ToUint32(&index)) continue;
+ DCHECK_EQ(object->GetElementsKind(), original_elements_kind);
uint32_t entry = Subclass::GetEntryForIndexImpl(
isolate, *object, object->elements(), index, filter);
if (entry == kMaxUInt32) continue;
-
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
+ Handle<Object> value;
if (details.kind() == kData) {
value = Subclass::GetImpl(isolate, object->elements(), entry);
} else {
+ // This might modify the elements and/or change the elements kind.
LookupIterator it(isolate, object, index, LookupIterator::OWN);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, value, Object::GetProperty(&it), Nothing<bool>());
}
- if (get_entries) {
- value = MakeEntryPair(isolate, index, value);
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
+ values_or_entries->set(count++, *value);
+ if (object->GetElementsKind() != original_elements_kind) break;
+ }
+
+ // Slow path caused by changes in elements kind during iteration.
+ for (; i < keys->length(); i++) {
+ Handle<Object> key(keys->get(i), isolate);
+ uint32_t index;
+ if (!key->ToUint32(&index)) continue;
+
+ if (filter & ONLY_ENUMERABLE) {
+ InternalElementsAccessor* accessor =
+ reinterpret_cast<InternalElementsAccessor*>(
+ object->GetElementsAccessor());
+ uint32_t entry = accessor->GetEntryForIndex(isolate, *object,
+ object->elements(), index);
+ if (entry == kMaxUInt32) continue;
+ PropertyDetails details = accessor->GetDetails(*object, entry);
+ if (!details.IsEnumerable()) continue;
}
+
+ Handle<Object> value;
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::GetProperty(&it),
+ Nothing<bool>());
+
+ if (get_entries) value = MakeEntryPair(isolate, index, value);
values_or_entries->set(count++, *value);
}
@@ -1710,12 +1756,14 @@ class DictionaryElementsAccessor
return result;
}
}
-
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
int entry = dictionary->FindEntry(isolate, k);
if (entry == NumberDictionary::kNotFound) {
if (search_for_hole) return Just(true);
@@ -1780,15 +1828,16 @@ class DictionaryElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ ElementsKind original_elements_kind = receiver->GetElementsKind();
+ USE(original_elements_kind);
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(receiver->elements()), isolate);
// Iterate through entire range, as accessing elements out of order is
// observable.
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind);
int entry = dictionary->FindEntry(isolate, k);
- if (entry == NumberDictionary::kNotFound) {
- continue;
- }
+ if (entry == NumberDictionary::kNotFound) continue;
PropertyDetails details = GetDetailsImpl(*dictionary, entry);
switch (details.kind()) {
@@ -3195,13 +3244,16 @@ class TypedElementsAccessor
}
template <typename SourceTraits>
- static void CopyBetweenBackingStores(FixedTypedArrayBase* source,
+ static void CopyBetweenBackingStores(void* source_data_ptr,
BackingStore* dest, size_t length,
uint32_t offset) {
- FixedTypedArray<SourceTraits>* source_fta =
- FixedTypedArray<SourceTraits>::cast(source);
+ DisallowHeapAllocation no_gc;
for (uint32_t i = 0; i < length; i++) {
- typename SourceTraits::ElementType elem = source_fta->get_scalar(i);
+ // We use scalar accessors to avoid boxing/unboxing, so there are no
+ // allocations.
+ typename SourceTraits::ElementType elem =
+ FixedTypedArray<SourceTraits>::get_scalar_from_data_ptr(
+ source_data_ptr, i);
dest->set(offset + i, dest->from(elem));
}
}
@@ -3232,15 +3284,10 @@ class TypedElementsAccessor
bool both_are_simple = HasSimpleRepresentation(source_type) &&
HasSimpleRepresentation(destination_type);
- // We assume the source and destination don't overlap, even though they
- // can share the same buffer. This is always true for newly allocated
- // TypedArrays.
uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
uint8_t* dest_data = static_cast<uint8_t*>(destination_elements->DataPtr());
size_t source_byte_length = NumberToSize(source->byte_length());
size_t dest_byte_length = NumberToSize(destination->byte_length());
- CHECK(dest_data + dest_byte_length <= source_data ||
- source_data + source_byte_length <= dest_data);
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
@@ -3248,16 +3295,25 @@ class TypedElementsAccessor
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
size_t element_size = source->element_size();
- std::memcpy(dest_data + offset * element_size, source_data,
- length * element_size);
+ std::memmove(dest_data + offset * element_size, source_data,
+ length * element_size);
} else {
- // We use scalar accessors below to avoid boxing/unboxing, so there are
- // no allocations.
+ Isolate* isolate = source->GetIsolate();
+ Zone zone(isolate->allocator(), ZONE_NAME);
+
+ // If the typedarrays are overlapped, clone the source.
+ if (dest_data + dest_byte_length > source_data &&
+ source_data + source_byte_length > dest_data) {
+ uint8_t* temp_data = zone.NewArray<uint8_t>(source_byte_length);
+ std::memcpy(temp_data, source_data, source_byte_length);
+ source_data = temp_data;
+ }
+
switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<Type##ArrayTraits>( \
- source_elements, destination_elements, length, offset); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>( \
+ source_data, destination_elements, length, offset); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3273,7 +3329,7 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return true;
#endif
@@ -3698,12 +3754,13 @@ class SloppyArgumentsElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
- Handle<Map> original_map = handle(object->map(), isolate);
+ Handle<Map> original_map(object->map(), isolate);
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
uint32_t entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
@@ -3739,11 +3796,12 @@ class SloppyArgumentsElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
- Handle<Map> original_map = handle(object->map(), isolate);
+ Handle<Map> original_map(object->map(), isolate);
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
for (uint32_t k = start_from; k < length; ++k) {
+ DCHECK_EQ(object->map(), *original_map);
uint32_t entry =
GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 348af6d8ea..de5aa0d878 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -54,9 +54,10 @@ class ElementsAccessor {
// typed array elements.
virtual bool HasEntry(JSObject* holder, uint32_t entry) = 0;
+ // TODO(cbruni): HasEntry and Get should not be exposed publicly with the
+ // entry parameter.
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
- virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
virtual bool HasAccessors(JSObject* holder) = 0;
virtual uint32_t NumberOfElements(JSObject* holder) = 0;
@@ -67,8 +68,6 @@ class ElementsAccessor {
// element that is non-deletable.
virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
- // Deletes an element in an object.
- virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -126,11 +125,6 @@ class ElementsAccessor {
virtual void Set(Handle<JSObject> holder, uint32_t entry, Object* value) = 0;
- virtual void Reconfigure(Handle<JSObject> object,
- Handle<FixedArrayBase> backing_store, uint32_t entry,
- Handle<Object> value,
- PropertyAttributes attributes) = 0;
-
virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
@@ -214,6 +208,15 @@ class ElementsAccessor {
FixedArrayBase* backing_store,
uint32_t index) = 0;
+ virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+ virtual void Reconfigure(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) = 0;
+
+ // Deletes an element in an object.
+ virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
+
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameter |source_holder| in the function that allocates.
// This is done intentionally to avoid ArrayConcat() builtin performance
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index ee4bd55534..edd329f5da 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -55,7 +55,8 @@ namespace {
MUST_USE_RESULT MaybeHandle<Object> Invoke(
Isolate* isolate, bool is_construct, Handle<Object> target,
Handle<Object> receiver, int argc, Handle<Object> args[],
- Handle<Object> new_target, Execution::MessageHandling message_handling) {
+ Handle<Object> new_target, Execution::MessageHandling message_handling,
+ Execution::Target execution_target) {
DCHECK(!receiver->IsJSGlobalObject());
#ifdef USE_SIMULATOR
@@ -113,20 +114,30 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
// Placeholder for return value.
Object* value = nullptr;
- typedef Object* (*JSEntryFunction)(Object* new_target, Object* target,
- Object* receiver, int argc,
- Object*** args);
-
- Handle<Code> code = is_construct
- ? isolate->factory()->js_construct_entry_code()
- : isolate->factory()->js_entry_code();
+ using JSEntryFunction =
+ GeneratedCode<Object*(Object * new_target, Object * target,
+ Object * receiver, int argc, Object*** args)>;
+
+ Handle<Code> code;
+ switch (execution_target) {
+ case Execution::Target::kCallable:
+ code = is_construct ? isolate->factory()->js_construct_entry_code()
+ : isolate->factory()->js_entry_code();
+ break;
+ case Execution::Target::kRunMicrotasks:
+ code = isolate->factory()->js_run_microtasks_entry_code();
+ break;
+ default:
+ UNREACHABLE();
+ }
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
SealHandleScope shs(isolate);
- JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+ JSEntryFunction stub_entry =
+ JSEntryFunction::FromAddress(isolate, code->entry());
if (FLAG_clear_exceptions_on_js_entry) isolate->clear_pending_exception();
@@ -138,9 +149,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
if (FLAG_profile_deserialization && target->IsJSFunction()) {
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
- value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
- argc, argv);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ value = stub_entry.Call(orig_func, func, recv, argc, argv);
}
#ifdef VERIFY_HEAP
@@ -167,7 +177,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[],
- Execution::MessageHandling message_handling) {
+ Execution::MessageHandling message_handling,
+ Execution::Target target) {
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
// directly to a global object.
@@ -176,7 +187,8 @@ MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
return Invoke(isolate, false, callable, receiver, argc, argv,
- isolate->factory()->undefined_value(), message_handling);
+ isolate->factory()->undefined_value(), message_handling,
+ target);
}
} // namespace
@@ -186,7 +198,7 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[]) {
return CallInternal(isolate, callable, receiver, argc, argv,
- MessageHandling::kReport);
+ MessageHandling::kReport, Execution::Target::kCallable);
}
@@ -203,15 +215,13 @@ MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
Handle<Object> argv[]) {
return Invoke(isolate, true, constructor,
isolate->factory()->undefined_value(), argc, argv, new_target,
- MessageHandling::kReport);
+ MessageHandling::kReport, Execution::Target::kCallable);
}
-MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver, int argc,
- Handle<Object> args[],
- MessageHandling message_handling,
- MaybeHandle<Object>* exception_out) {
+MaybeHandle<Object> Execution::TryCall(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+ int argc, Handle<Object> args[], MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out, Target target) {
bool is_termination = false;
MaybeHandle<Object> maybe_result;
if (exception_out != nullptr) *exception_out = MaybeHandle<Object>();
@@ -226,8 +236,8 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result =
- CallInternal(isolate, callable, receiver, argc, args, message_handling);
+ maybe_result = CallInternal(isolate, callable, receiver, argc, args,
+ message_handling, target);
if (maybe_result.is_null()) {
DCHECK(isolate->has_pending_exception());
@@ -253,6 +263,13 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
return maybe_result;
}
+MaybeHandle<Object> Execution::RunMicrotasks(
+ Isolate* isolate, MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out) {
+ auto undefined = isolate->factory()->undefined_value();
+ return TryCall(isolate, undefined, undefined, 0, {}, message_handling,
+ exception_out, Target::kRunMicrotasks);
+}
void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access(isolate_);
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index eeebfadde2..7dd920a446 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/globals.h"
+#include "src/objects/code.h"
#include "src/utils.h"
namespace v8 {
@@ -20,6 +21,7 @@ class Execution final : public AllStatic {
public:
// Whether to report pending messages, or keep them pending on the isolate.
enum class MessageHandling { kReport, kKeepPending };
+ enum class Target { kCallable, kRunMicrotasks };
// Call a function, the caller supplies a receiver and an array
// of arguments.
@@ -54,7 +56,12 @@ class Execution final : public AllStatic {
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MessageHandling message_handling,
- MaybeHandle<Object>* exception_out);
+ MaybeHandle<Object>* exception_out,
+ Target target = Target::kCallable);
+ // Convenience method for performing RunMicrotasks
+ static MaybeHandle<Object> RunMicrotasks(Isolate* isolate,
+ MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out);
};
@@ -162,8 +169,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
void DisableInterrupts();
#if V8_TARGET_ARCH_64_BIT
- static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
+ static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
+ static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
#else
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xfffffff8;
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index f0b5d72387..aa9d5c4364 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -86,24 +86,6 @@ void StatisticsExtension::GetCounters(
STATS_COUNTER_LIST_1(ADD_COUNTER) STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_##name(), "count_of_" #name } \
- , {counters->size_of_##name(), "size_of_" #name},
-
- INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_CODE_TYPE_##name(), "count_of_CODE_TYPE_" #name } \
- , {counters->size_of_CODE_TYPE_##name(), "size_of_CODE_TYPE_" #name},
-
- CODE_KIND_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- { counters->count_of_FIXED_ARRAY_##name(), "count_of_FIXED_ARRAY_" #name } \
- , {counters->size_of_FIXED_ARRAY_##name(), "size_of_FIXED_ARRAY_" #name},
-
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
-#undef ADD_COUNTER
}; // End counter_list array.
for (size_t i = 0; i < arraysize(counter_list); i++) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 123f9c2fd2..52157b5034 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -92,6 +92,10 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
Add(ExternalReference::builtins_address(isolate).address(), "builtins");
+ Add(ExternalReference::handle_scope_implementer_address(isolate).address(),
+ "Isolate::handle_scope_implementer_address");
+ Add(ExternalReference::pending_microtask_count_address(isolate).address(),
+ "Isolate::pending_microtask_count_address()");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
Add(ExternalReference::bytecode_size_table_address(isolate).address(),
@@ -341,6 +345,10 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"IncrementalMarking::RecordWrite");
Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
"StoreBuffer::StoreBufferOverflow");
+
+ Add(ExternalReference::invalidate_prototype_chains_function(isolate)
+ .address(),
+ "JSObject::InvalidatePrototypeChains()");
}
void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
diff --git a/deps/v8/src/factory-inl.h b/deps/v8/src/factory-inl.h
index 02cdef3a15..ace5c35472 100644
--- a/deps/v8/src/factory-inl.h
+++ b/deps/v8/src/factory-inl.h
@@ -29,6 +29,14 @@ ROOT_LIST(ROOT_ACCESSOR)
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Handle<Map> Factory::name##_map() { \
+ return Handle<Map>(bit_cast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##Size##MapRootIndex])); \
+ }
+DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) \
Handle<String> Factory::name() { \
return Handle<String>(bit_cast<String**>( \
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index ea3936e232..fab539bf8b 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -337,14 +337,6 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
}
-Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
- Handle<TypeFeedbackInfo> info =
- Handle<TypeFeedbackInfo>::cast(NewStruct(TUPLE3_TYPE, TENURED));
- info->initialize_storage();
- return info;
-}
-
-
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
Utf8StringKey key(string, isolate()->heap()->HashSeed());
@@ -1003,6 +995,7 @@ Handle<Context> Factory::NewNativeContext() {
context->set_math_random_index(Smi::kZero);
Handle<WeakCell> weak_cell = NewWeakCell(context);
context->set_self_weak_cell(*weak_cell);
+ context->set_serialized_objects(*empty_fixed_array());
DCHECK(context->IsNativeContext());
return context;
}
@@ -1184,7 +1177,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_type(Script::TYPE_NORMAL);
script->set_wrapper(heap->undefined_value());
script->set_line_ends(heap->undefined_value());
- script->set_eval_from_shared(heap->undefined_value());
+ script->set_eval_from_shared_or_wrapped_arguments(heap->undefined_value());
script->set_eval_from_position(0);
script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
script->set_flags(0);
@@ -1881,7 +1874,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Create a new map for the global object.
Handle<Map> new_map = Map::CopyDropDescriptors(map);
new_map->set_may_have_interesting_symbols(true);
- new_map->set_dictionary_map(true);
+ new_map->set_is_dictionary_map(true);
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary);
@@ -1985,6 +1978,18 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
array->set_length(Smi::FromInt(length));
}
+Handle<JSWeakMap> Factory::NewJSWeakMap() {
+ Context* native_context = isolate()->raw_native_context();
+ Handle<Map> map(native_context->js_weak_map_fun()->initial_map());
+ Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)));
+ {
+ // Do not leak handles for the hash table, it would make entries strong.
+ HandleScope scope(isolate());
+ JSWeakCollection::Initialize(weakmap, isolate());
+ }
+ return weakmap;
+}
+
Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<Map> map = isolate()->js_module_namespace_map();
Handle<JSModuleNamespace> module_namespace(
@@ -2775,6 +2780,46 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
return map;
}
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 1:
+ map = load_handler1_map();
+ break;
+ case 2:
+ map = load_handler2_map();
+ break;
+ case 3:
+ map = load_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return New<LoadHandler>(map, OLD_SPACE);
+}
+
+Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
+ Handle<Map> map;
+ switch (data_count) {
+ case 0:
+ map = store_handler0_map();
+ break;
+ case 1:
+ map = store_handler1_map();
+ break;
+ case 2:
+ map = store_handler2_map();
+ break;
+ case 3:
+ map = store_handler3_map();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return New<StoreHandler>(map, OLD_SPACE);
+}
void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
JSRegExp::Type type,
@@ -2866,7 +2911,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
- map->set_is_callable();
+ map->set_is_callable(true);
Handle<JSFunction> empty_function;
if (maybe_empty_function.ToHandle(&empty_function)) {
Map::SetPrototype(map, empty_function);
@@ -2945,7 +2990,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
//
@@ -3010,7 +3055,7 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
map->set_has_prototype_slot(true);
map->set_is_constructor(true);
map->set_is_prototype_map(true);
- map->set_is_callable();
+ map->set_is_callable(true);
Map::SetPrototype(map, empty_function);
//
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index cb76aab3b7..f0e9d63885 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -7,8 +7,10 @@
#include "src/feedback-vector.h"
#include "src/globals.h"
+#include "src/ic/handler-configuration.h"
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/objects/data-handler.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
@@ -29,7 +31,12 @@ class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
class FreshlyAllocatedBigInt;
+class JSMap;
+class JSMapIterator;
class JSModuleNamespace;
+class JSSet;
+class JSSetIterator;
+class JSWeakMap;
class NewFunctionArgs;
struct SourceRange;
class PreParsedScopeData;
@@ -164,9 +171,6 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
- // Create an empty TypeFeedbackInfo.
- Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
-
// Finds the internalized copy for string in the string table.
// If not found, a new string is added to the table and returned.
Handle<String> InternalizeUtf8String(Vector<const char> str);
@@ -552,6 +556,8 @@ class V8_EXPORT_PRIVATE Factory final {
int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+ Handle<JSWeakMap> NewJSWeakMap();
+
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
Handle<JSModuleNamespace> NewJSModuleNamespace();
@@ -734,6 +740,11 @@ class V8_EXPORT_PRIVATE Factory final {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Handle<Map> name##_map();
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) inline Handle<String> name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
@@ -805,6 +816,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
int number_of_properties);
+ Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<StoreHandler> NewStoreHandler(int data_count);
+
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
// Creates a new FixedArray that holds the data associated with the
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index 7c8438e62f..9572c7026d 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -316,7 +316,6 @@ static void BiggestPowerTen(uint32_t number,
}
}
-
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
@@ -345,15 +344,15 @@ static void BiggestPowerTen(uint32_t number,
// then false is returned. This usually happens rarely (~0.5%).
//
// Say, for the sake of example, that
-// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w.e() == -48, and w.f() == 0x1234567890ABCDEF
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
-// w's fractional part is therefore 0x567890abcdef.
+// w's fractional part is therefore 0x567890ABCDEF.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the point would be computed by
-// (0x567890abcdef * 10) >> 48. -> 3
+// (0x567890ABCDEF * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index e14381f2ab..888fa01854 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -117,7 +117,8 @@ bool FeedbackVector::has_optimized_code() const {
}
bool FeedbackVector::has_optimization_marker() const {
- return optimization_marker() != OptimizationMarker::kNone;
+ return optimization_marker() != OptimizationMarker::kLogFirstExecution &&
+ optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.
@@ -171,9 +172,7 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
case BinaryOperationFeedback::kString:
return BinaryOperationHint::kString;
case BinaryOperationFeedback::kBigInt:
- // TODO(jarin/jkummerow/neis): Support BigInts in TF.
- // Fall through for now.
- case BinaryOperationFeedback::kAny:
+ return BinaryOperationHint::kBigInt;
default:
return BinaryOperationHint::kAny;
}
@@ -197,6 +196,8 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kString;
case CompareOperationFeedback::kSymbol:
return CompareOperationHint::kSymbol;
+ case CompareOperationFeedback::kBigInt:
+ return CompareOperationHint::kBigInt;
case CompareOperationFeedback::kReceiver:
return CompareOperationHint::kReceiver;
default:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 0572b85395..c3bdd82616 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -234,7 +234,9 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
DCHECK_EQ(vector->shared_function_info(), *shared);
DCHECK_EQ(vector->optimized_code_cell(),
- Smi::FromEnum(OptimizationMarker::kNone));
+ Smi::FromEnum(FLAG_log_function_events
+ ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone));
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK_EQ(vector->deopt_count(), 0);
@@ -253,6 +255,8 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
switch (kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
vector->set(index, isolate->heap()->empty_weak_cell(),
SKIP_WRITE_BARRIER);
break;
@@ -278,8 +282,6 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
@@ -341,12 +343,18 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
vector->set_optimized_code_cell(*cell);
}
-void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- set_optimized_code_cell(Smi::FromEnum(marker));
+void FeedbackVector::ClearOptimizedCode() {
+ DCHECK(has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
}
-void FeedbackVector::ClearOptimizedCode() {
- set_optimized_code_cell(Smi::FromEnum(OptimizationMarker::kNone));
+void FeedbackVector::ClearOptimizationMarker() {
+ DCHECK(!has_optimized_code());
+ SetOptimizationMarker(OptimizationMarker::kNone);
+}
+
+void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
+ set_optimized_code_cell(Smi::FromEnum(marker));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
@@ -356,7 +364,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
WeakCell* cell = WeakCell::cast(slot);
if (cell->cleared()) {
- ClearOptimizedCode();
+ ClearOptimizationMarker();
return;
}
@@ -424,10 +432,17 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
}
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreOwnNamed: {
+ StoreICNexus nexus(this, slot);
+ if (!nexus.IsCleared()) {
+ nexus.Clear();
+ feedback_updated = true;
+ }
+ break;
+ }
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreICNexus nexus(this, slot);
+ StoreGlobalICNexus nexus(this, slot);
if (!nexus.IsCleared()) {
nexus.Clear();
feedback_updated = true;
@@ -564,18 +579,6 @@ InlineCacheState LoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
-InlineCacheState LoadGlobalICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- Object* extra = GetFeedbackExtra();
- if (!WeakCell::cast(feedback)->cleared() ||
- extra != *FeedbackVector::UninitializedSentinel(isolate)) {
- return MONOMORPHIC;
- }
- return UNINITIALIZED;
-}
-
InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -602,6 +605,56 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
+void GlobalICNexus::ConfigureUninitialized() {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+void GlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*isolate->factory()->NewWeakCell(cell));
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
+ int context_slot_index) {
+ DCHECK_LE(0, script_context_index);
+ DCHECK_LE(0, context_slot_index);
+ if (!ContextIndexBits::is_valid(script_context_index) ||
+ !SlotIndexBits::is_valid(context_slot_index)) {
+ return false;
+ }
+ int config = ContextIndexBits::encode(script_context_index) |
+ SlotIndexBits::encode(context_slot_index);
+
+ SetFeedback(Smi::FromInt(config));
+ Isolate* isolate = GetIsolate();
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ return true;
+}
+
+void GlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
+ SetFeedback(GetIsolate()->heap()->empty_weak_cell());
+ SetFeedbackExtra(*handler);
+}
+
+InlineCacheState GlobalICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+ if (feedback->IsSmi()) return MONOMORPHIC;
+
+ Object* extra = GetFeedbackExtra();
+ if (!WeakCell::cast(feedback)->cleared() ||
+ extra != *FeedbackVector::UninitializedSentinel(isolate)) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+}
+
InlineCacheState StoreICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -667,16 +720,31 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
-int CallICNexus::ExtractCallCount() {
+int CallICNexus::GetCallCount() {
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
- int value = Smi::ToInt(call_count);
- return value;
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return CallCountField::decode(value);
}
+void CallICNexus::SetSpeculationMode(SpeculationMode mode) {
+ Object* call_count = GetFeedbackExtra();
+ CHECK(call_count->IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ int result = static_cast<int>(CallCountField::decode(value) |
+ SpeculationModeField::encode(mode));
+ SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
+}
+
+SpeculationMode CallICNexus::GetSpeculationMode() {
+ Object* call_count = GetFeedbackExtra();
+ CHECK(call_count->IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return SpeculationModeField::decode(value);
+}
float CallICNexus::ComputeCallFrequency() {
double const invocation_count = vector()->invocation_count();
- double const call_count = ExtractCallCount();
+ double const call_count = GetCallCount();
if (invocation_count == 0) {
// Prevent division by 0.
return 0.0f;
@@ -691,25 +759,6 @@ void CallICNexus::ConfigureUninitialized() {
SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
}
-void LoadGlobalICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
-}
-
-void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
- Isolate* isolate = GetIsolate();
- SetFeedback(*isolate->factory()->NewWeakCell(cell));
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
-}
-
-void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
- SetFeedback(GetIsolate()->heap()->empty_weak_cell());
- SetFeedbackExtra(*handler);
-}
-
void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
Handle<Object> handler) {
@@ -896,14 +945,10 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
for (const Handle<Object>& maybe_code_handler : handlers) {
// The first handler that isn't the slow handler will have the bits we need.
Handle<Code> handler;
- if (maybe_code_handler->IsTuple3()) {
- // Elements transition.
- Handle<Tuple3> data_handler = Handle<Tuple3>::cast(maybe_code_handler);
- handler = handle(Code::cast(data_handler->value2()));
- } else if (maybe_code_handler->IsTuple2()) {
- // Element store with prototype chain check.
- Handle<Tuple2> data_handler = Handle<Tuple2>::cast(maybe_code_handler);
- handler = handle(Code::cast(data_handler->value2()));
+ if (maybe_code_handler->IsStoreHandler()) {
+ Handle<StoreHandler> data_handler =
+ Handle<StoreHandler>::cast(maybe_code_handler);
+ handler = handle(Code::cast(data_handler->smi_handler()));
} else if (maybe_code_handler->IsSmi()) {
// Skip proxy handlers.
DCHECK_EQ(*maybe_code_handler, *StoreHandler::StoreProxy(GetIsolate()));
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index fdcf9ff01a..9f8096d138 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/elements-kind.h"
+#include "src/globals.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
#include "src/objects/object-macros.h"
@@ -89,6 +90,10 @@ inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
kind == FeedbackSlotKind::kStoreKeyedStrict;
}
+inline bool IsGlobalICKind(FeedbackSlotKind kind) {
+ return IsLoadGlobalICKind(kind) || IsStoreGlobalICKind(kind);
+}
+
inline bool IsTypeProfileKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kTypeProfile;
}
@@ -174,6 +179,9 @@ class FeedbackVector : public HeapObject {
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
+ // Clears the optimization marker in the feedback vector.
+ void ClearOptimizationMarker();
+
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
@@ -204,6 +212,7 @@ class FeedbackVector : public HeapObject {
bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
DEFINE_SLOT_KIND_PREDICATE(IsCallIC)
+ DEFINE_SLOT_KIND_PREDICATE(IsGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
@@ -644,11 +653,16 @@ class CallICNexus final : public FeedbackNexus {
return length == 0;
}
- int ExtractCallCount();
+ int GetCallCount();
+ void SetSpeculationMode(SpeculationMode mode);
+ SpeculationMode GetSpeculationMode();
// Compute the call frequency based on the call count and the invocation
// count (taken from the type feedback vector).
float ComputeCallFrequency();
+
+ typedef BitField<SpeculationMode, 0, 1> SpeculationModeField;
+ typedef BitField<uint32_t, 1, 31> CallCountField;
};
class LoadICNexus : public FeedbackNexus {
@@ -667,35 +681,6 @@ class LoadICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
};
-class LoadGlobalICNexus : public FeedbackNexus {
- public:
- LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
- LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
-
- int ExtractMaps(MapHandles* maps) const final {
- // LoadGlobalICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-
- void ConfigureUninitialized() override;
- void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
- void ConfigureHandlerMode(Handle<Object> handler);
-
- InlineCacheState StateFromFeedback() const override;
-};
-
class KeyedLoadICNexus : public FeedbackNexus {
public:
KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
@@ -719,13 +704,11 @@ class StoreICNexus : public FeedbackNexus {
public:
StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot) ||
- vector->IsStoreGlobalIC(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot) ||
- vector->IsStoreGlobalIC(slot));
+ DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
}
void Clear() override { ConfigurePremonomorphic(); }
@@ -733,6 +716,74 @@ class StoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
};
+// Base class for LoadGlobalICNexus and StoreGlobalICNexus.
+class GlobalICNexus : public FeedbackNexus {
+ public:
+ GlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->IsGlobalIC(slot));
+ }
+ GlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->IsGlobalIC(slot));
+ }
+
+ int ExtractMaps(MapHandles* maps) const final {
+ // Load/StoreGlobalICs don't record map feedback.
+ return 0;
+ }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
+ return length == 0;
+ }
+
+ void ConfigureUninitialized() override;
+ void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+ // Returns false if given combination of indices is not allowed.
+ bool ConfigureLexicalVarMode(int script_context_index,
+ int context_slot_index);
+ void ConfigureHandlerMode(Handle<Object> handler);
+
+ InlineCacheState StateFromFeedback() const override;
+
+// Bit positions in a smi that encodes lexical environment variable access.
+#define LEXICAL_MODE_BIT_FIELDS(V, _) \
+ V(ContextIndexBits, unsigned, 12, _) \
+ V(SlotIndexBits, unsigned, 19, _)
+
+ DEFINE_BIT_FIELDS(LEXICAL_MODE_BIT_FIELDS)
+#undef LEXICAL_MODE_BIT_FIELDS
+
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize);
+};
+
+class LoadGlobalICNexus : public GlobalICNexus {
+ public:
+ LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsLoadGlobalIC(slot));
+ }
+ LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsLoadGlobalIC(slot));
+ }
+};
+
+class StoreGlobalICNexus : public GlobalICNexus {
+ public:
+ StoreGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsStoreGlobalIC(slot));
+ }
+ StoreGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+ : GlobalICNexus(vector, slot) {
+ DCHECK(vector->IsStoreGlobalIC(slot));
+ }
+};
+
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
typedef StoreICNexus StoreOwnICNexus;
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 92635703bb..e40e182dad 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -26,7 +26,7 @@
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
- static ctype const FLAG_##nam = def;
+ static constexpr ctype FLAG_##nam = def;
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
@@ -44,7 +44,7 @@
// for MODE_META, so there is no impact on the flags interface.
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- static ctype const FLAGDEFAULT_##nam = def;
+ static constexpr ctype FLAGDEFAULT_##nam = def;
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
@@ -161,7 +161,6 @@ struct MaybeBoolFlag {
#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
-#define DEFINE_SIZE_T(nam, def, cmt) FLAG(SIZE_T, size_t, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
#define DEFINE_ARGS(nam, cmt) \
FLAG(ARGS, JSArguments, nam, {0 COMMA nullptr}, cmt)
@@ -169,11 +168,16 @@ struct MaybeBoolFlag {
#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
#define DEFINE_ALIAS_FLOAT(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
-#define DEFINE_ALIAS_SIZE_T(alias, nam) FLAG_ALIAS(SIZE_T, size_t, alias, nam)
#define DEFINE_ALIAS_STRING(alias, nam) \
FLAG_ALIAS(STRING, const char*, alias, nam)
#define DEFINE_ALIAS_ARGS(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
+#ifdef DEBUG
+#define DEFINE_DEBUG_BOOL DEFINE_BOOL
+#else
+#define DEFINE_DEBUG_BOOL DEFINE_BOOL_READONLY
+#endif
+
//
// Flags in all modes.
//
@@ -192,7 +196,10 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Enabling import.meta requires to also enable import()
DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
+
DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
+DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
+DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
@@ -201,8 +208,9 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
- V(harmony_public_fields, "harmony public fields in class literals") \
- V(harmony_bigint, "harmony arbitrary precision integers")
+ V(harmony_static_fields, "harmony static fields in class literals") \
+ V(harmony_bigint, "harmony arbitrary precision integers") \
+ V(harmony_private_fields, "harmony private fields in class literals")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
@@ -210,14 +218,15 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
"constructor") \
- V(harmony_dynamic_import, "harmony dynamic import")
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_public_fields, "harmony public fields in class literals") \
+ V(harmony_optional_catch_binding, "allow omitting binding in catch blocks")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING_BASE(V) \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony Unicode regexp property classes") \
- V(harmony_async_iteration, "harmony async iteration") \
V(harmony_promise_finally, "harmony Promise.prototype.finally")
#ifdef V8_INTL_SUPPORT
@@ -267,7 +276,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, preparser_scope_analysis)
+DEFINE_IMPLICATION(future, background_compile)
DEFINE_IMPLICATION(future, write_protect_code_memory)
// Flags for experimental implementation features.
@@ -449,7 +458,6 @@ DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
-DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
@@ -458,12 +466,8 @@ DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
DEFINE_BOOL(turbo_store_elimination, true,
"enable store-store elimination in TurboFan")
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
-DEFINE_BOOL(turbo_experimental, false,
- "enable crashing features, for testing purposes only")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
-// TODO(rmcilroy): Remove extra_masking once the finch experiment is removed.
-DEFINE_BOOL(extra_masking, false, "obsolete - has no effect")
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
@@ -489,14 +493,18 @@ DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
-DEFINE_BOOL(wasm_trace_native_heap, false, "trace wasm native heap events")
-DEFINE_BOOL(wasm_jit_to_native, false,
+DEFINE_DEBUG_BOOL(wasm_trace_native_heap, false,
+ "trace wasm native heap events")
+DEFINE_BOOL(wasm_jit_to_native, true,
"JIT wasm code to native (not JS GC) memory")
+DEFINE_BOOL(wasm_write_protect_code_memory, false,
+ "write protect code memory on the wasm native heap")
+DEFINE_IMPLICATION(future, wasm_jit_to_native)
DEFINE_BOOL(wasm_trace_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
-DEFINE_BOOL(wasm_stream_compilation, false,
+DEFINE_BOOL(wasm_stream_compilation, true,
"enable streaming compilation for WebAssembly")
DEFINE_IMPLICATION(wasm_stream_compilation, wasm_async_compilation)
DEFINE_BOOL(wasm_test_streaming, false,
@@ -508,21 +516,24 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum memory size of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
-DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
-DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
-DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
-DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
-DEFINE_BOOL(trace_wasm_streaming, false,
- "trace streaming compilation of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_decode_time, false,
+ "trace decoding time of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
+ "trace interpretation of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_streaming, false,
+ "trace streaming compilation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, false,
"enable liftoff, the experimental wasm baseline compiler")
-DEFINE_BOOL(trace_liftoff, false, "trace liftoff, the wasm baseline compiler")
+DEFINE_DEBUG_BOOL(trace_liftoff, false,
+ "trace liftoff, the wasm baseline compiler")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
-DEFINE_BOOL(wasm_break_on_decoder_error, false,
- "debug break when wasm decoder encounters an error")
+DEFINE_DEBUG_BOOL(wasm_break_on_decoder_error, false,
+ "debug break when wasm decoder encounters an error")
DEFINE_BOOL(wasm_trace_memory, false,
"print all memory updates performed in wasm code")
@@ -535,7 +546,7 @@ DEFINE_BOOL(trace_asm_scanner, false,
DEFINE_BOOL(trace_asm_parser, false, "verbose logging of asm.js parse failures")
DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
-DEFINE_BOOL(dump_wasm_module, false, "dump wasm module bytes")
+DEFINE_DEBUG_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, nullptr,
"directory to dump wasm modules to")
@@ -547,6 +558,8 @@ DEFINE_BOOL(experimental_wasm_mv, false,
"enable prototype multi-value support for wasm")
DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
+DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
+ "enable non-trapping float-to-int conversions for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -557,8 +570,9 @@ DEFINE_BOOL(wasm_no_stack_checks, false,
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
" (experimental, currently Linux x86_64 only)")
-DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
- "Generate a test case when running the wasm-code fuzzer")
+DEFINE_BOOL(wasm_fuzzer_gen_test, false,
+ "Generate a test case when running a wasm fuzzer")
+DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
DEFINE_BOOL(wasm_interpret_all, false,
"Execute all wasm code in the wasm interpreter")
@@ -567,8 +581,8 @@ DEFINE_BOOL(asm_wasm_lazy_compilation, false,
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
-DEFINE_BOOL(trace_wasm_lazy_compilation, false,
- "trace lazy compilation of wasm functions")
+DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
+ "trace lazy compilation of wasm functions")
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
@@ -577,24 +591,27 @@ DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
DEFINE_INT(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
-DEFINE_INT(generic_ic_threshold, 30,
- "max percentage of megamorphic/generic ICs to allow optimization")
-DEFINE_INT(self_opt_count, 130, "call count before self-optimization")
+
+DEFINE_INT(stress_sampling_allocation_profiler, 0,
+ "Enables sampling allocation profiler with X as a sample interval")
// Garbage collections flags.
-DEFINE_SIZE_T(min_semi_space_size, 0,
- "min size of a semi-space (in MBytes), the new space consists of "
- "two semi-spaces")
-DEFINE_SIZE_T(max_semi_space_size, 0,
- "max size of a semi-space (in MBytes), the new space consists of "
- "two semi-spaces")
+DEFINE_INT(min_semi_space_size, 0,
+ "min size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
+DEFINE_INT(max_semi_space_size, 0,
+ "max size of a semi-space (in MBytes), the new space consists of two"
+ "semi-spaces")
DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
-DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
-DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
+DEFINE_INT(random_gc_interval, 0,
+ "Collect garbage after random(0, X) allocations. It overrides "
+ "gc_interval.")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_INT(retain_maps_for_n_gc, 2,
"keeps maps alive for <n> old space garbage collections")
@@ -649,10 +666,13 @@ DEFINE_BOOL(parallel_pointer_update, true,
"use parallel pointer update during compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
+DEFINE_BOOL(trace_stress_marking, false, "trace stress marking progress")
+DEFINE_BOOL(trace_stress_scavenge, false, "trace stress scavenge progress")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
+DEFINE_BOOL(trace_zone_stats, false, "trace zone memory usage")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
DEFINE_BOOL(concurrent_array_buffer_freeing, true,
@@ -698,9 +718,19 @@ DEFINE_BOOL(stress_compaction_random, false,
"evacuation candidates. It overrides stress_compaction.")
DEFINE_BOOL(stress_incremental_marking, false,
"force incremental marking for small heaps and run it more often")
+
+DEFINE_BOOL(fuzzer_gc_analysis, false,
+ "prints number of allocations and enables analysis mode for gc "
+ "fuzz testing, e.g. --stress-marking, --stress-scavenge")
DEFINE_INT(stress_marking, 0,
"force marking at random points between 0 and X (inclusive) percent "
"of the regular marking start limit")
+DEFINE_INT(stress_scavenge, 0,
+ "force scavenge at random points between 0 and X (inclusive) "
+ "percent of the new space capacity")
+DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_marking)
+DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_scavenge)
+
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -780,8 +810,6 @@ DEFINE_BOOL(force_slow_path, false, "always take the slow path for builtins")
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
// codegen-ia32.cc / codegen-arm.cc
-DEFINE_BOOL(trace_codegen, false,
- "print name of functions for which code is generated")
DEFINE_BOOL(trace, false, "trace function calls")
// codegen.cc
@@ -897,12 +925,11 @@ DEFINE_BOOL(trace_prototype_users, false,
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
DEFINE_BOOL(trace_maps, false, "trace map creation")
+DEFINE_BOOL(trace_maps_details, true, "also log map details")
DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
-DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
DEFINE_BOOL(aggressive_lazy_inner_functions, false,
"even lazier inner function parsing")
@@ -932,8 +959,6 @@ DEFINE_INT(sim_stack_alignment, 8,
DEFINE_INT(sim_stack_size, 2 * MB / KB,
"Stack size of the ARM64, MIPS64 and PPC64 simulator "
"in kBytes (default is 2 MB)")
-DEFINE_BOOL(log_regs_modified, true,
- "When logging register values, only print modified registers.")
DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
"When logging, try to use coloured output.")
DEFINE_BOOL(ignore_asm_unimplemented_break, false,
@@ -964,7 +989,7 @@ DEFINE_INT(random_seed, 0,
"(0, the default, means to use system random).")
DEFINE_INT(fuzzer_random_seed, 0,
"Default seed for initializing fuzzer random generator "
- "(0, the default, means to use system random).")
+ "(0, the default, means to use v8's random number generator seed).")
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
@@ -1071,7 +1096,6 @@ DEFINE_BOOL(trace_contexts, false, "trace contexts operations")
// heap.cc
DEFINE_BOOL(gc_verbose, false, "print stuff during garbage collection")
-DEFINE_BOOL(heap_stats, false, "report heap statistics before and after GC")
DEFINE_BOOL(code_stats, false, "report code statistics after GC")
DEFINE_BOOL(print_handles, false, "report handles after GC")
DEFINE_BOOL(check_handle_count, false,
@@ -1094,8 +1118,6 @@ DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
DEFINE_BOOL(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
-DEFINE_BOOL(trace_live_bytes, false,
- "trace incrementing and resetting of live bytes")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
@@ -1110,8 +1132,8 @@ DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
DEFINE_BOOL(print_break_location, false, "print source location on debug break")
// wasm instance management
-DEFINE_BOOL(trace_wasm_instances, false,
- "trace creation and collection of wasm instances")
+DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
+ "trace creation and collection of wasm instances")
//
// Logging and profiling flags
@@ -1126,8 +1148,6 @@ DEFINE_BOOL(log_all, false, "Log all events to the log file.")
DEFINE_BOOL(log_api, false, "Log API events to the log file.")
DEFINE_BOOL(log_code, false,
"Log code events to the log file without profiling.")
-DEFINE_BOOL(log_gc, false,
- "Log heap samples on garbage collection for the hp2ps tool.")
DEFINE_BOOL(log_handles, false, "Log global handle events.")
DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(log_source_code, false, "Log source code.")
@@ -1242,9 +1262,6 @@ DEFINE_IMPLICATION(print_all_code, print_code_verbose)
DEFINE_IMPLICATION(print_all_code, print_builtin_code)
DEFINE_IMPLICATION(print_all_code, print_code_stubs)
DEFINE_IMPLICATION(print_all_code, code_comments)
-#ifdef DEBUG
-DEFINE_IMPLICATION(print_all_code, trace_codegen)
-#endif
#endif
#undef FLAG
@@ -1317,6 +1334,7 @@ DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
#undef DEFINE_BOOL
#undef DEFINE_MAYBE_BOOL
+#undef DEFINE_DEBUG_BOOL
#undef DEFINE_INT
#undef DEFINE_STRING
#undef DEFINE_FLOAT
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index a51a4e7d71..693e514e94 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -5,7 +5,6 @@
#include "src/flags.h"
#include <cctype>
-#include <cerrno>
#include <cstdlib>
#include <sstream>
@@ -40,7 +39,6 @@ struct Flag {
TYPE_INT,
TYPE_UINT,
TYPE_FLOAT,
- TYPE_SIZE_T,
TYPE_STRING,
TYPE_ARGS
};
@@ -83,11 +81,6 @@ struct Flag {
return reinterpret_cast<double*>(valptr_);
}
- size_t* size_t_variable() const {
- DCHECK(type_ == TYPE_SIZE_T);
- return reinterpret_cast<size_t*>(valptr_);
- }
-
const char* string_value() const {
DCHECK(type_ == TYPE_STRING);
return *reinterpret_cast<const char**>(valptr_);
@@ -126,11 +119,6 @@ struct Flag {
return *reinterpret_cast<const double*>(defptr_);
}
- size_t size_t_default() const {
- DCHECK(type_ == TYPE_SIZE_T);
- return *reinterpret_cast<const size_t*>(defptr_);
- }
-
const char* string_default() const {
DCHECK(type_ == TYPE_STRING);
return *reinterpret_cast<const char* const *>(defptr_);
@@ -154,8 +142,6 @@ struct Flag {
return *uint_variable() == uint_default();
case TYPE_FLOAT:
return *float_variable() == float_default();
- case TYPE_SIZE_T:
- return *size_t_variable() == size_t_default();
case TYPE_STRING: {
const char* str1 = string_value();
const char* str2 = string_default();
@@ -187,9 +173,6 @@ struct Flag {
case TYPE_FLOAT:
*float_variable() = float_default();
break;
- case TYPE_SIZE_T:
- *size_t_variable() = size_t_default();
- break;
case TYPE_STRING:
set_string_value(string_default(), false);
break;
@@ -218,8 +201,6 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_UINT:
return "uint";
case Flag::TYPE_FLOAT: return "float";
- case Flag::TYPE_SIZE_T:
- return "size_t";
case Flag::TYPE_STRING: return "string";
case Flag::TYPE_ARGS: return "arguments";
}
@@ -246,9 +227,6 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
case Flag::TYPE_FLOAT:
os << *flag.float_variable();
break;
- case Flag::TYPE_SIZE_T:
- os << *flag.size_t_variable();
- break;
case Flag::TYPE_STRING: {
const char* str = flag.string_value();
os << (str ? str : "nullptr");
@@ -380,27 +358,6 @@ static Flag* FindFlag(const char* name) {
return nullptr;
}
-template <typename T>
-bool TryParseUnsigned(Flag* flag, const char* arg, const char* value,
- char** endp, T* out_val) {
- // We do not use strtoul because it accepts negative numbers.
- // Rejects values >= 2**63 when T is 64 bits wide but that
- // seems like an acceptable trade-off.
- uint64_t max = static_cast<uint64_t>(std::numeric_limits<T>::max());
- errno = 0;
- int64_t val = static_cast<int64_t>(strtoll(value, endp, 10));
- if (val < 0 || static_cast<uint64_t>(val) > max || errno != 0) {
- PrintF(stderr,
- "Error: Value for flag %s of type %s is out of bounds "
- "[0-%" PRIu64
- "]\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()), max);
- return false;
- }
- *out_val = static_cast<T>(val);
- return true;
-}
// static
int FlagList::SetFlagsFromCommandLine(int* argc,
@@ -465,21 +422,27 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_INT:
*flag->int_variable() = static_cast<int>(strtol(value, &endp, 10));
break;
- case Flag::TYPE_UINT:
- if (!TryParseUnsigned(flag, arg, value, &endp,
- flag->uint_variable())) {
+ case Flag::TYPE_UINT: {
+ // We do not use strtoul because it accepts negative numbers.
+ int64_t val = static_cast<int64_t>(strtoll(value, &endp, 10));
+ if (val < 0 || val > std::numeric_limits<unsigned int>::max()) {
+ PrintF(stderr,
+ "Error: Value for flag %s of type %s is out of bounds "
+ "[0-%" PRIu64
+ "]\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()),
+ static_cast<uint64_t>(
+ std::numeric_limits<unsigned int>::max()));
return_code = j;
+ break;
}
+ *flag->uint_variable() = static_cast<unsigned int>(val);
break;
+ }
case Flag::TYPE_FLOAT:
*flag->float_variable() = strtod(value, &endp);
break;
- case Flag::TYPE_SIZE_T:
- if (!TryParseUnsigned(flag, arg, value, &endp,
- flag->size_t_variable())) {
- return_code = j;
- }
- break;
case Flag::TYPE_STRING:
flag->set_string_value(value ? StrDup(value) : nullptr, true);
break;
@@ -619,10 +582,13 @@ void FlagList::PrintHelp() {
" run the new debugging shell\n\n"
"Options:\n";
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- os << " --" << f->name() << " (" << f->comment() << ")\n"
- << " type: " << Type2String(f->type()) << " default: " << *f
+ for (const Flag& f : flags) {
+ os << " --";
+ for (const char* c = f.name(); *c != '\0'; ++c) {
+ os << NormalizeChar(*c);
+ }
+ os << " (" << f.comment() << ")\n"
+ << " type: " << Type2String(f.type()) << " default: " << f
<< "\n";
}
}
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index 8d2d1f8cc4..f042855657 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -217,7 +217,8 @@ class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
// FP-relative.
static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
+ static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ DEFINE_TYPED_FRAME_SIZES(3);
};
class BuiltinFrameConstants : public TypedFrameConstants {
@@ -241,9 +242,10 @@ class ConstructFrameConstants : public TypedFrameConstants {
static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static const int kConstructorOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
static const int kNewTargetOrImplicitReceiverOffset =
- TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
- DEFINE_TYPED_FRAME_SIZES(4);
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(4);
+ DEFINE_TYPED_FRAME_SIZES(5);
};
class BuiltinContinuationFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 3438c1dfb0..f5a14471ba 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -50,18 +50,22 @@ inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
}
}
+inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
-inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
+inline Address NativeFrame::GetCallerStackPointer() const {
+ return fp() + CommonFrameConstants::kCallerSPOffset;
}
+inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
+
inline ConstructEntryFrame::ConstructEntryFrame(
StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {}
inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
-}
+ : StackFrame(iterator) {}
inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 23713197f5..d5a04ad933 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -16,7 +16,8 @@
#include "src/string-stream.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/zone-containers.h"
@@ -424,7 +425,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
- return NONE;
+ return NATIVE;
} else if (IsInterpreterFramePc(iterator->isolate(),
*(state->pc_address))) {
return INTERPRETED;
@@ -439,19 +440,19 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// than checking the flag, then getting the code, and then, if both are true
// (non-null, respectivelly), going down the wasm_code path.
wasm::WasmCode* wasm_code =
- iterator->isolate()->wasm_code_manager()->LookupCode(pc);
+ iterator->isolate()->wasm_engine()->code_manager()->LookupCode(pc);
if (wasm_code != nullptr) {
switch (wasm_code->kind()) {
- case wasm::WasmCode::InterpreterStub:
+ case wasm::WasmCode::kInterpreterStub:
return WASM_INTERPRETER_ENTRY;
- case wasm::WasmCode::Function:
- case wasm::WasmCode::CopiedStub:
+ case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kCopiedStub:
return WASM_COMPILED;
- case wasm::WasmCode::LazyStub:
+ case wasm::WasmCode::kLazyStub:
if (StackFrame::IsTypeMarker(marker)) break;
return BUILTIN;
- case wasm::WasmCode::WasmToJsWrapper:
- case wasm::WasmCode::WasmToWasmWrapper:
+ case wasm::WasmCode::kWasmToJsWrapper:
+ case wasm::WasmCode::kWasmToWasmWrapper:
return WASM_TO_JS;
default:
UNREACHABLE();
@@ -491,7 +492,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
break;
}
} else {
- return NONE;
+ return NATIVE;
}
}
}
@@ -519,7 +520,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// interpreted frames, should never have a StackFrame::Type
// marker. If we find one, we're likely being called from the
// profiler in a bogus stack frame.
- return NONE;
+ return NATIVE;
}
}
@@ -541,6 +542,14 @@ Address StackFrame::UnpaddedFP() const {
return fp();
}
+void NativeFrame::ComputeCallerState(State* state) const {
+ state->sp = caller_sp();
+ state->fp = Memory::Address_at(fp() + CommonFrameConstants::kCallerFPOffset);
+ state->pc_address = ResolveReturnAddressLocation(
+ reinterpret_cast<Address*>(fp() + CommonFrameConstants::kCallerPCOffset));
+ state->callee_pc_address = nullptr;
+ state->constant_pool_address = nullptr;
+}
Code* EntryFrame::unchecked_code() const {
return isolate()->heap()->js_entry_code();
@@ -777,7 +786,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
Address inner_pointer = pc();
const wasm::WasmCode* wasm_code =
FLAG_wasm_jit_to_native
- ? isolate()->wasm_code_manager()->LookupCode(inner_pointer)
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer)
: nullptr;
SafepointEntry safepoint_entry;
uint32_t stack_slots;
@@ -789,7 +798,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
wasm_code->stack_slots());
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
- has_tagged_params = wasm_code->kind() != wasm::WasmCode::Function;
+ has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction;
} else {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
@@ -840,6 +849,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// in the place on the stack that one finds the frame type.
UNREACHABLE();
break;
+ case NATIVE:
case NONE:
case NUMBER_OF_TYPES:
case MANUAL:
@@ -1236,26 +1246,26 @@ WASM_SUMMARY_DISPATCH(int, byte_offset)
#undef WASM_SUMMARY_DISPATCH
int FrameSummary::WasmFrameSummary::SourcePosition() const {
- Handle<WasmCompiledModule> compiled_module(wasm_instance()->compiled_module(),
- isolate());
- return WasmCompiledModule::GetSourcePosition(compiled_module,
- function_index(), byte_offset(),
- at_to_number_conversion());
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance()->compiled_module()->shared(), isolate());
+ return WasmSharedModuleData::GetSourcePosition(
+ shared, function_index(), byte_offset(), at_to_number_conversion());
}
Handle<Script> FrameSummary::WasmFrameSummary::script() const {
- return handle(wasm_instance()->compiled_module()->script());
+ return handle(wasm_instance()->compiled_module()->shared()->script());
}
Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
- Handle<WasmCompiledModule> compiled_module(
- wasm_instance()->compiled_module());
- return WasmCompiledModule::GetFunctionName(compiled_module->GetIsolate(),
- compiled_module, function_index());
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance()->compiled_module()->shared(), isolate());
+ return WasmSharedModuleData::GetFunctionName(isolate(), shared,
+ function_index());
}
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
- return wasm_instance()->compiled_module()->native_context();
+ return handle(wasm_instance()->compiled_module()->native_context(),
+ isolate());
}
FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
@@ -1535,6 +1545,7 @@ void OptimizedFrame::GetFunctions(
DCHECK_EQ(Translation::BEGIN, opcode);
it.Next(); // Skip frame count.
int jsframe_count = it.Next();
+ it.Next(); // Skip update feedback count.
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
@@ -1691,11 +1702,18 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add("WASM [");
Script* script = this->script();
accumulator->PrintName(script->name());
- int pc = static_cast<int>(this->pc() - LookupCode()->instruction_start());
- Object* instance = this->wasm_instance();
+ Address instruction_start = FLAG_wasm_jit_to_native
+ ? isolate()
+ ->wasm_engine()
+ ->code_manager()
+ ->LookupCode(pc())
+ ->instructions()
+ .start()
+ : LookupCode()->instruction_start();
+ int pc = static_cast<int>(this->pc() - instruction_start);
+ WasmSharedModuleData* shared = wasm_instance()->compiled_module()->shared();
Vector<const uint8_t> raw_func_name =
- WasmInstanceObject::cast(instance)->compiled_module()->GetRawFunctionName(
- this->function_index());
+ shared->GetRawFunctionName(this->function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
@@ -1718,11 +1736,18 @@ Address WasmCompiledFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
+WasmCodeWrapper WasmCompiledFrame::wasm_code() const {
+ return FLAG_wasm_jit_to_native
+ ? WasmCodeWrapper(
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
+ : WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
+}
+
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
WasmInstanceObject* obj =
FLAG_wasm_jit_to_native
? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_code_manager()->LookupCode(pc()))
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
: WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame; it must have a live instance.
DCHECK_NOT_NULL(obj);
@@ -1734,7 +1759,7 @@ uint32_t WasmCompiledFrame::function_index() const {
}
Script* WasmCompiledFrame::script() const {
- return wasm_instance()->compiled_module()->script();
+ return wasm_instance()->compiled_module()->shared()->script();
}
int WasmCompiledFrame::position() const {
@@ -1743,25 +1768,9 @@ int WasmCompiledFrame::position() const {
void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- WasmCodeWrapper code;
- Handle<WasmInstanceObject> instance;
- int offset = -1;
- if (FLAG_wasm_jit_to_native) {
- code = WasmCodeWrapper(isolate()->wasm_code_manager()->LookupCode(pc()));
- offset =
- static_cast<int>(pc() - code.GetWasmCode()->instructions().start());
- instance = Handle<WasmInstanceObject>(
- WasmInstanceObject::cast(code.GetWasmCode()
- ->owner()
- ->compiled_module()
- ->weak_owning_instance()
- ->value()),
- isolate());
- } else {
- code = WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
- offset = static_cast<int>(pc() - code.GetCode()->instruction_start());
- instance = Handle<WasmInstanceObject>(wasm_instance(), isolate());
- }
+ WasmCodeWrapper code = wasm_code();
+ int offset = static_cast<int>(pc() - code.instructions().start());
+ Handle<WasmInstanceObject> instance = code.wasm_instance();
FrameSummary::WasmCompiledFrameSummary summary(
isolate(), instance, code, offset, at_to_number_conversion());
functions->push_back(summary);
@@ -1774,9 +1783,10 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
int pos = -1;
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* code =
- callee_pc ? isolate()->wasm_code_manager()->LookupCode(callee_pc)
- : nullptr;
- if (!code || code->kind() != wasm::WasmCode::WasmToJsWrapper) return false;
+ callee_pc
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc)
+ : nullptr;
+ if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
int offset = static_cast<int>(callee_pc - code->instructions().start());
pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code,
offset);
@@ -1800,11 +1810,11 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
*stack_slots = code->stack_slots();
return table->LookupReturn(pc_offset);
}
- wasm::WasmCode* code = isolate()->wasm_code_manager()->LookupCode(pc());
+ wasm::WasmCode* code =
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous()) {
Object* table_entry =
- code->owner()->compiled_module()->ptr_to_handler_table()->get(
- code->index());
+ code->owner()->compiled_module()->handler_table()->get(code->index());
if (table_entry->IsHandlerTable()) {
HandlerTable* table = HandlerTable::cast(table_entry);
int pc_offset = static_cast<int>(pc() - code->instructions().start());
@@ -1854,7 +1864,7 @@ WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
WasmInstanceObject* ret =
FLAG_wasm_jit_to_native
? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_code_manager()->LookupCode(pc()))
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
: WasmInstanceObject::GetOwningInstanceGC(LookupCode());
// This is a live stack frame, there must be a live wasm instance available.
DCHECK_NOT_NULL(ret);
@@ -1862,7 +1872,7 @@ WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
}
Script* WasmInterpreterEntryFrame::script() const {
- return wasm_instance()->compiled_module()->script();
+ return wasm_instance()->compiled_module()->shared()->script();
}
int WasmInterpreterEntryFrame::position() const {
@@ -1870,7 +1880,7 @@ int WasmInterpreterEntryFrame::position() const {
}
Object* WasmInterpreterEntryFrame::context() const {
- return wasm_instance()->compiled_module()->ptr_to_native_context();
+ return wasm_instance()->compiled_module()->native_context();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
@@ -2081,10 +2091,11 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
void InternalFrame::Iterate(RootVisitor* v) const {
wasm::WasmCode* wasm_code =
- FLAG_wasm_jit_to_native ? isolate()->wasm_code_manager()->LookupCode(pc())
- : nullptr;
+ FLAG_wasm_jit_to_native
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(pc())
+ : nullptr;
if (wasm_code != nullptr) {
- DCHECK(wasm_code->kind() == wasm::WasmCode::LazyStub);
+ DCHECK(wasm_code->kind() == wasm::WasmCode::kLazyStub);
} else {
Code* code = LookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index e21d62764b..0c988770f6 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -64,8 +64,9 @@ class InnerPointerToCodeCache {
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
+ static const int kPaddingOffset = 1 * kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
+ static const int kSize = kPaddingOffset + kPointerSize;
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@@ -104,7 +105,8 @@ class StackHandler BASE_EMBEDDED {
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
V(BUILTIN, BuiltinFrame) \
- V(BUILTIN_EXIT, BuiltinExitFrame)
+ V(BUILTIN_EXIT, BuiltinExitFrame) \
+ V(NATIVE, NativeFrame)
// Abstract base class for all stack frames.
class StackFrame BASE_EMBEDDED {
@@ -180,8 +182,7 @@ class StackFrame BASE_EMBEDDED {
// and should be converted back to a stack frame type using MarkerToType.
// Otherwise, the value is a tagged function pointer.
static bool IsTypeMarker(intptr_t function_or_marker) {
- bool is_marker = ((function_or_marker & kSmiTagMask) == kSmiTag);
- return is_marker;
+ return (function_or_marker & kSmiTagMask) == kSmiTag;
}
// Copy constructor; it breaks the connection to host iterator
@@ -328,6 +329,25 @@ class StackFrame BASE_EMBEDDED {
friend class SafeStackFrameIterator;
};
+class NativeFrame : public StackFrame {
+ public:
+ Type type() const override { return NATIVE; }
+
+ Code* unchecked_code() const override { return nullptr; }
+
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override {}
+
+ protected:
+ inline explicit NativeFrame(StackFrameIteratorBase* iterator);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
// Entry frames are used to enter JavaScript execution from C.
class EntryFrame: public StackFrame {
@@ -949,6 +969,7 @@ class WasmCompiledFrame final : public StandardFrame {
// Accessors.
WasmInstanceObject* wasm_instance() const;
+ WasmCodeWrapper wasm_code() const;
uint32_t function_index() const;
Script* script() const override;
int position() const override;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index fc35100a30..de56faa4fd 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -222,7 +222,7 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
public:
enum Type {
S_REGULAR = 0x0u,
- S_ATTR_COALESCED = 0xbu,
+ S_ATTR_COALESCED = 0xBu,
S_ATTR_SOME_INSTRUCTIONS = 0x400u,
S_ATTR_DEBUG = 0x02000000u,
S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
@@ -297,9 +297,9 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
TYPE_DYNSYM = 11,
TYPE_LOPROC = 0x70000000,
TYPE_X86_64_UNWIND = 0x70000001,
- TYPE_HIPROC = 0x7fffffff,
+ TYPE_HIPROC = 0x7FFFFFFF,
TYPE_LOUSER = 0x80000000,
- TYPE_HIUSER = 0xffffffff
+ TYPE_HIUSER = 0xFFFFFFFF
};
enum Flags {
@@ -308,9 +308,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
FLAG_EXEC = 4
};
- enum SpecialIndexes {
- INDEX_ABSOLUTE = 0xfff1
- };
+ enum SpecialIndexes { INDEX_ABSOLUTE = 0xFFF1 };
ELFSection(const char* name, Type type, uintptr_t align)
: name_(name), type_(type), align_(align) { }
@@ -650,20 +648,20 @@ class ELF BASE_EMBEDDED {
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
(V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
- const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 0,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_S390X
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 3,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 2, 1, 3,
0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_S390
- const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 1, 2, 1, 3,
+ const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 2, 1, 3,
0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
@@ -1089,12 +1087,12 @@ class DebugInfoSection : public DebugSection {
DW_OP_reg7 = 0x57,
DW_OP_reg8 = 0x58,
DW_OP_reg9 = 0x59,
- DW_OP_reg10 = 0x5a,
- DW_OP_reg11 = 0x5b,
- DW_OP_reg12 = 0x5c,
- DW_OP_reg13 = 0x5d,
- DW_OP_reg14 = 0x5e,
- DW_OP_reg15 = 0x5f,
+ DW_OP_reg10 = 0x5A,
+ DW_OP_reg11 = 0x5B,
+ DW_OP_reg12 = 0x5C,
+ DW_OP_reg13 = 0x5D,
+ DW_OP_reg14 = 0x5E,
+ DW_OP_reg15 = 0x5F,
DW_OP_reg16 = 0x60,
DW_OP_reg17 = 0x61,
DW_OP_reg18 = 0x62,
@@ -1105,12 +1103,12 @@ class DebugInfoSection : public DebugSection {
DW_OP_reg23 = 0x67,
DW_OP_reg24 = 0x68,
DW_OP_reg25 = 0x69,
- DW_OP_reg26 = 0x6a,
- DW_OP_reg27 = 0x6b,
- DW_OP_reg28 = 0x6c,
- DW_OP_reg29 = 0x6d,
- DW_OP_reg30 = 0x6e,
- DW_OP_reg31 = 0x6f,
+ DW_OP_reg26 = 0x6A,
+ DW_OP_reg27 = 0x6B,
+ DW_OP_reg28 = 0x6C,
+ DW_OP_reg29 = 0x6D,
+ DW_OP_reg30 = 0x6E,
+ DW_OP_reg31 = 0x6F,
DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
};
@@ -1286,11 +1284,11 @@ class DebugAbbrevSection : public DebugSection {
// DWARF2 standard, figure 14.
enum DWARF2Tags {
DW_TAG_FORMAL_PARAMETER = 0x05,
- DW_TAG_POINTER_TYPE = 0xf,
+ DW_TAG_POINTER_TYPE = 0xF,
DW_TAG_COMPILE_UNIT = 0x11,
DW_TAG_STRUCTURE_TYPE = 0x13,
DW_TAG_BASE_TYPE = 0x24,
- DW_TAG_SUBPROGRAM = 0x2e,
+ DW_TAG_SUBPROGRAM = 0x2E,
DW_TAG_VARIABLE = 0x34
};
@@ -1304,11 +1302,11 @@ class DebugAbbrevSection : public DebugSection {
enum DWARF2Attribute {
DW_AT_LOCATION = 0x2,
DW_AT_NAME = 0x3,
- DW_AT_BYTE_SIZE = 0xb,
+ DW_AT_BYTE_SIZE = 0xB,
DW_AT_STMT_LIST = 0x10,
DW_AT_LOW_PC = 0x11,
DW_AT_HIGH_PC = 0x12,
- DW_AT_ENCODING = 0x3e,
+ DW_AT_ENCODING = 0x3E,
DW_AT_FRAME_BASE = 0x40,
DW_AT_TYPE = 0x49
};
@@ -1320,8 +1318,8 @@ class DebugAbbrevSection : public DebugSection {
DW_FORM_STRING = 0x8,
DW_FORM_DATA4 = 0x6,
DW_FORM_BLOCK = 0x9,
- DW_FORM_DATA1 = 0xb,
- DW_FORM_FLAG = 0xc,
+ DW_FORM_DATA1 = 0xB,
+ DW_FORM_FLAG = 0xC,
DW_FORM_REF4 = 0x13
};
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index c101877a6f..7845d71fb1 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -262,7 +262,7 @@ class GlobalHandles::Node {
}
// Zap with something dangerous.
- *location() = reinterpret_cast<Object*>(0x6057ca11);
+ *location() = reinterpret_cast<Object*>(0x6057CA11);
typedef v8::WeakCallbackInfo<void> Data;
auto callback = reinterpret_cast<Data::Callback>(weak_callback_);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 8f5253016f..bc28181db1 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -98,7 +98,7 @@ namespace internal {
#endif
// Minimum stack size in KB required by compilers.
-const int kStackSpaceRequiredForCompilation = 40;
+constexpr int kStackSpaceRequiredForCompilation = 40;
// Determine whether double field unboxing feature is enabled.
#if V8_TARGET_ARCH_64_BIT
@@ -131,105 +131,106 @@ typedef byte* Address;
// -----------------------------------------------------------------------------
// Constants
-const int KB = 1024;
-const int MB = KB * KB;
-const int GB = KB * KB * KB;
-const int kMaxInt = 0x7FFFFFFF;
-const int kMinInt = -kMaxInt - 1;
-const int kMaxInt8 = (1 << 7) - 1;
-const int kMinInt8 = -(1 << 7);
-const int kMaxUInt8 = (1 << 8) - 1;
-const int kMinUInt8 = 0;
-const int kMaxInt16 = (1 << 15) - 1;
-const int kMinInt16 = -(1 << 15);
-const int kMaxUInt16 = (1 << 16) - 1;
-const int kMinUInt16 = 0;
-
-const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kMinUInt32 = 0;
-
-const int kUInt8Size = sizeof(uint8_t);
-const int kCharSize = sizeof(char);
-const int kShortSize = sizeof(short); // NOLINT
-const int kUInt16Size = sizeof(uint16_t);
-const int kIntSize = sizeof(int);
-const int kInt32Size = sizeof(int32_t);
-const int kInt64Size = sizeof(int64_t);
-const int kUInt32Size = sizeof(uint32_t);
-const int kSizetSize = sizeof(size_t);
-const int kFloatSize = sizeof(float);
-const int kDoubleSize = sizeof(double);
-const int kIntptrSize = sizeof(intptr_t);
-const int kUIntptrSize = sizeof(uintptr_t);
-const int kPointerSize = sizeof(void*);
+constexpr int KB = 1024;
+constexpr int MB = KB * KB;
+constexpr int GB = KB * KB * KB;
+constexpr int kMaxInt = 0x7FFFFFFF;
+constexpr int kMinInt = -kMaxInt - 1;
+constexpr int kMaxInt8 = (1 << 7) - 1;
+constexpr int kMinInt8 = -(1 << 7);
+constexpr int kMaxUInt8 = (1 << 8) - 1;
+constexpr int kMinUInt8 = 0;
+constexpr int kMaxInt16 = (1 << 15) - 1;
+constexpr int kMinInt16 = -(1 << 15);
+constexpr int kMaxUInt16 = (1 << 16) - 1;
+constexpr int kMinUInt16 = 0;
+
+constexpr uint32_t kMaxUInt32 = 0xFFFFFFFFu;
+constexpr int kMinUInt32 = 0;
+
+constexpr int kUInt8Size = sizeof(uint8_t);
+constexpr int kCharSize = sizeof(char);
+constexpr int kShortSize = sizeof(short); // NOLINT
+constexpr int kUInt16Size = sizeof(uint16_t);
+constexpr int kIntSize = sizeof(int);
+constexpr int kInt32Size = sizeof(int32_t);
+constexpr int kInt64Size = sizeof(int64_t);
+constexpr int kUInt32Size = sizeof(uint32_t);
+constexpr int kSizetSize = sizeof(size_t);
+constexpr int kFloatSize = sizeof(float);
+constexpr int kDoubleSize = sizeof(double);
+constexpr int kIntptrSize = sizeof(intptr_t);
+constexpr int kUIntptrSize = sizeof(uintptr_t);
+constexpr int kPointerSize = sizeof(void*);
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-const int kRegisterSize = kPointerSize + kPointerSize;
+constexpr int kRegisterSize = kPointerSize + kPointerSize;
#else
-const int kRegisterSize = kPointerSize;
+constexpr int kRegisterSize = kPointerSize;
#endif
-const int kPCOnStackSize = kRegisterSize;
-const int kFPOnStackSize = kRegisterSize;
+constexpr int kPCOnStackSize = kRegisterSize;
+constexpr int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
+constexpr int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
#else
-const int kElidedFrameSlots = 0;
+constexpr int kElidedFrameSlots = 0;
#endif
-const int kDoubleSizeLog2 = 3;
-const size_t kMaxWasmCodeMemory = 256 * MB;
+constexpr int kDoubleSizeLog2 = 3;
+constexpr size_t kMaxWasmCodeMemory = 256 * MB;
#if V8_HOST_ARCH_64_BIT
-const int kPointerSizeLog2 = 3;
-const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
-const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
-const bool kRequiresCodeRange = true;
+constexpr int kPointerSizeLog2 = 3;
+constexpr intptr_t kIntptrSignBit =
+ static_cast<intptr_t>(uintptr_t{0x8000000000000000});
+constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
+constexpr bool kRequiresCodeRange = true;
#if V8_TARGET_ARCH_MIPS64
// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
// encoded immediate, the addresses have to be in range of 256MB aligned
// region. Used only for large object space.
-const size_t kMaximalCodeRangeSize = 256 * MB;
-const size_t kCodeRangeAreaAlignment = 256 * MB;
+constexpr size_t kMaximalCodeRangeSize = 256 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 256 * MB;
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const size_t kMaximalCodeRangeSize = 512 * MB;
-const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr size_t kMaximalCodeRangeSize = 512 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
-const size_t kMaximalCodeRangeSize = 512 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMaximalCodeRangeSize = 512 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
-const size_t kMinimumCodeRangeSize = 4 * MB;
-const size_t kReservedCodeRangePages = 1;
+constexpr size_t kMinimumCodeRangeSize = 4 * MB;
+constexpr size_t kReservedCodeRangePages = 1;
#else
-const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kReservedCodeRangePages = 0;
+constexpr size_t kMinimumCodeRangeSize = 3 * MB;
+constexpr size_t kReservedCodeRangePages = 0;
#endif
#else
-const int kPointerSizeLog2 = 2;
-const intptr_t kIntptrSignBit = 0x80000000;
-const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+constexpr int kPointerSizeLog2 = 2;
+constexpr intptr_t kIntptrSignBit = 0x80000000;
+constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
// x32 port also requires code range.
-const bool kRequiresCodeRange = true;
-const size_t kMaximalCodeRangeSize = 256 * MB;
-const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr bool kRequiresCodeRange = true;
+constexpr size_t kMaximalCodeRangeSize = 256 * MB;
+constexpr size_t kMinimumCodeRangeSize = 3 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const bool kRequiresCodeRange = false;
-const size_t kMaximalCodeRangeSize = 0 * MB;
-const size_t kMinimumCodeRangeSize = 0 * MB;
-const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr bool kRequiresCodeRange = false;
+constexpr size_t kMaximalCodeRangeSize = 0 * MB;
+constexpr size_t kMinimumCodeRangeSize = 0 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
-const bool kRequiresCodeRange = false;
-const size_t kMaximalCodeRangeSize = 0 * MB;
-const size_t kMinimumCodeRangeSize = 0 * MB;
-const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr bool kRequiresCodeRange = false;
+constexpr size_t kMaximalCodeRangeSize = 0 * MB;
+constexpr size_t kMinimumCodeRangeSize = 0 * MB;
+constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
-const size_t kReservedCodeRangePages = 0;
+constexpr size_t kReservedCodeRangePages = 0;
#endif
// Trigger an incremental GCs once the external memory reaches this limit.
-const int kExternalAllocationSoftLimit = 64 * MB;
+constexpr int kExternalAllocationSoftLimit = 64 * MB;
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
@@ -238,39 +239,39 @@ const int kExternalAllocationSoftLimit = 64 * MB;
// account.
//
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
-const int kMaxRegularHeapObjectSize = 507136;
+constexpr int kMaxRegularHeapObjectSize = 507136;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
-const int kBitsPerPointer = kPointerSize * kBitsPerByte;
-const int kBitsPerInt = kIntSize * kBitsPerByte;
+constexpr int kBitsPerByte = 8;
+constexpr int kBitsPerByteLog2 = 3;
+constexpr int kBitsPerPointer = kPointerSize * kBitsPerByte;
+constexpr int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
-const uint32_t kBinary32SignMask = 0x80000000u;
-const uint32_t kBinary32ExponentMask = 0x7f800000u;
-const uint32_t kBinary32MantissaMask = 0x007fffffu;
-const int kBinary32ExponentBias = 127;
-const int kBinary32MaxExponent = 0xFE;
-const int kBinary32MinExponent = 0x01;
-const int kBinary32MantissaBits = 23;
-const int kBinary32ExponentShift = 23;
+constexpr uint32_t kBinary32SignMask = 0x80000000u;
+constexpr uint32_t kBinary32ExponentMask = 0x7f800000u;
+constexpr uint32_t kBinary32MantissaMask = 0x007fffffu;
+constexpr int kBinary32ExponentBias = 127;
+constexpr int kBinary32MaxExponent = 0xFE;
+constexpr int kBinary32MinExponent = 0x01;
+constexpr int kBinary32MantissaBits = 23;
+constexpr int kBinary32ExponentShift = 23;
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+constexpr uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// Latin1/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
-const int kOneByteSize = kCharSize;
-const int kUC16Size = sizeof(uc16); // NOLINT
+constexpr int kOneByteSize = kCharSize;
+constexpr int kUC16Size = sizeof(uc16); // NOLINT
// 128 bit SIMD value size.
-const int kSimd128Size = 16;
+constexpr int kSimd128Size = 16;
// Round up n to be a multiple of sz, where sz is a power of 2.
#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
@@ -354,10 +355,10 @@ inline LanguageMode stricter_language_mode(LanguageMode mode1,
enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// This constant is used as an undefined value when passing source positions.
-const int kNoSourcePosition = -1;
+constexpr int kNoSourcePosition = -1;
// This constant is used to indicate missing deoptimization information.
-const int kNoDeoptimizationId = -1;
+constexpr int kNoDeoptimizationId = -1;
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft, kLazy };
@@ -392,63 +393,53 @@ inline std::ostream& operator<<(std::ostream& os,
}
// Mask for the sign bit in a smi.
-const intptr_t kSmiSignMask = kIntptrSignBit;
+constexpr intptr_t kSmiSignMask = kIntptrSignBit;
-const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
-const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
+constexpr int kObjectAlignmentBits = kPointerSizeLog2;
+constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
// Desired alignment for pointers.
-const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
-const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+constexpr intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for double values.
-const intptr_t kDoubleAlignment = 8;
-const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+constexpr intptr_t kDoubleAlignment = 8;
+constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
-const int kCodeAlignmentBits = 5;
-const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
-const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-
-// The owner field of a page is tagged with the page header tag. We need that
-// to find out if a slot is part of a large object. If we mask out the lower
-// 0xfffff bits (1M pages), go to the owner offset, and see that this field
-// is tagged with the page header tag, we can just look up the owner.
-// Otherwise, we know that we are somewhere (not within the first 1M) in a
-// large object.
-const int kPageHeaderTag = 3;
-const int kPageHeaderTagSize = 2;
-const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
+constexpr int kCodeAlignmentBits = 5;
+constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
+constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
+// Weak references are tagged using the second bit in a pointer.
+constexpr int kWeakReferenceTag = 3;
+constexpr int kWeakReferenceTagSize = 2;
+constexpr intptr_t kWeakReferenceTagMask = (1 << kWeakReferenceTagSize) - 1;
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a failure.
#ifdef V8_HOST_ARCH_64_BIT
-const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
-const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
-const Address kGlobalHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
-const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
-const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
+constexpr uint64_t kZapValue = uint64_t{0xdeadbeedbeadbeef};
+constexpr uint64_t kHandleZapValue = uint64_t{0x1baddead0baddeaf};
+constexpr uint64_t kGlobalHandleZapValue = uint64_t{0x1baffed00baffedf};
+constexpr uint64_t kFromSpaceZapValue = uint64_t{0x1beefdad0beefdaf};
+constexpr uint64_t kDebugZapValue = uint64_t{0xbadbaddbbadbaddb};
+constexpr uint64_t kSlotsZapValue = uint64_t{0xbeefdeadbeefdeef};
+constexpr uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
-const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
-const uint32_t kSlotsZapValue = 0xbeefdeef;
-const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
+constexpr uint32_t kZapValue = 0xdeadbeef;
+constexpr uint32_t kHandleZapValue = 0xbaddeaf;
+constexpr uint32_t kGlobalHandleZapValue = 0xbaffedf;
+constexpr uint32_t kFromSpaceZapValue = 0xbeefdaf;
+constexpr uint32_t kSlotsZapValue = 0xbeefdeef;
+constexpr uint32_t kDebugZapValue = 0xbadbaddb;
+constexpr uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
-const int kCodeZapValue = 0xbadc0de;
-const uint32_t kPhantomReferenceZap = 0xca11bac;
+constexpr int kCodeZapValue = 0xbadc0de;
+constexpr uint32_t kPhantomReferenceZap = 0xca11bac;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
@@ -457,8 +448,7 @@ const uint32_t kPhantomReferenceZap = 0xca11bac;
// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
-const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
-
+constexpr uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@@ -542,8 +532,7 @@ enum AllocationSpace {
FIRST_PAGED_SPACE = OLD_SPACE,
LAST_PAGED_SPACE = MAP_SPACE
};
-const int kSpaceTagSize = 3;
-const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+constexpr int kSpaceTagSize = 3;
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
@@ -628,7 +617,7 @@ enum VisitMode {
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION,
+ VISIT_FOR_SERIALIZATION,
};
// Flag indicating whether code is built into the VM (one of the natives files).
@@ -750,12 +739,12 @@ union IeeeDoubleBigEndianArchType {
#if V8_TARGET_LITTLE_ENDIAN
typedef IeeeDoubleLittleEndianArchType IeeeDoubleArchType;
-const int kIeeeDoubleMantissaWordOffset = 0;
-const int kIeeeDoubleExponentWordOffset = 4;
+constexpr int kIeeeDoubleMantissaWordOffset = 0;
+constexpr int kIeeeDoubleExponentWordOffset = 4;
#else
typedef IeeeDoubleBigEndianArchType IeeeDoubleArchType;
-const int kIeeeDoubleMantissaWordOffset = 4;
-const int kIeeeDoubleExponentWordOffset = 0;
+constexpr int kIeeeDoubleMantissaWordOffset = 4;
+constexpr int kIeeeDoubleExponentWordOffset = 0;
#endif
// AccessorCallback
@@ -930,20 +919,18 @@ enum AllocationSiteMode {
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
-const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
-const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
+constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
-const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
-const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
+constexpr uint32_t kHoleNanLower32 = 0xFFF7FFFF;
#endif
-const uint64_t kHoleNanInt64 =
+constexpr uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
-
// ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
-const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
-
+constexpr double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode : uint8_t {
@@ -1094,7 +1081,6 @@ enum FunctionKind : uint16_t {
kArrowFunction = 1 << 0,
kGeneratorFunction = 1 << 1,
kConciseMethod = 1 << 2,
- kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
kDefaultConstructor = 1 << 3,
kDerivedConstructor = 1 << 4,
kBaseConstructor = 1 << 5,
@@ -1102,6 +1088,10 @@ enum FunctionKind : uint16_t {
kSetterFunction = 1 << 7,
kAsyncFunction = 1 << 8,
kModule = 1 << 9,
+ kClassFieldsInitializerFunction = 1 << 10 | kConciseMethod,
+ kLastFunctionKind = kClassFieldsInitializerFunction,
+
+ kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
@@ -1133,7 +1123,8 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kAsyncArrowFunction ||
kind == FunctionKind::kAsyncConciseMethod ||
kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kAsyncGeneratorFunction;
+ kind == FunctionKind::kAsyncGeneratorFunction ||
+ kind == FunctionKind::kClassFieldsInitializerFunction;
}
@@ -1211,6 +1202,11 @@ inline bool IsClassConstructor(FunctionKind kind) {
return (kind & FunctionKind::kClassConstructor) != 0;
}
+inline bool IsClassFieldsInitializerFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind == FunctionKind::kClassFieldsInitializerFunction;
+}
+
inline bool IsConstructable(FunctionKind kind) {
if (IsAccessorFunction(kind)) return false;
if (IsConciseMethod(kind)) return false;
@@ -1253,14 +1249,17 @@ inline uint32_t ObjectHash(Address address) {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kSignedSmallInputs -> kNumber -> kNumberOrOddball -> kAny
-// kString -> kAny
-// kBigInt -> kAny
-// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
-// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
-// information about Number might reduce few instructions but causes more
-// deopts. We collect Number only because crankshaft does not handle all
-// cases of oddballs.
+//
+// kSignedSmall -> kSignedSmallInputs -> kNumber -> kNumberOrOddball -> kAny
+// kString -> kAny
+// kBigInt -> kAny
+//
+// Technically we wouldn't need the separation between the kNumber and the
+// kNumberOrOddball values here, since for binary operations, we always
+// truncate oddballs to numbers. In practice though it causes TurboFan to
+// generate quite a lot of unused code though if we always handle numbers
+// and oddballs everywhere, although in 99% of the use sites they are only
+// used with numbers.
class BinaryOperationFeedback {
public:
enum {
@@ -1278,11 +1277,15 @@ class BinaryOperationFeedback {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kNumber -> kAny
-// kInternalizedString -> kString -> kAny
-// kSymbol -> kAny
-// kReceiver -> kAny
-// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
+//
+// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
+// kInternalizedString -> kString -> kAny
+// kSymbol -> kAny
+// kBigInt -> kAny
+// kReceiver -> kAny
+//
+// This is distinct from BinaryOperationFeedback on purpose, because the
+// feedback that matters differs greatly as well as the way it is consumed.
class CompareOperationFeedback {
public:
enum {
@@ -1293,6 +1296,7 @@ class CompareOperationFeedback {
kInternalizedString = 0x8,
kString = 0x18,
kSymbol = 0x20,
+ kBigInt = 0x30,
kReceiver = 0x40,
kAny = 0xff
};
@@ -1419,6 +1423,7 @@ inline std::ostream& operator<<(std::ostream& os,
}
enum class OptimizationMarker {
+ kLogFirstExecution,
kNone,
kCompileOptimized,
kCompileOptimizedConcurrent,
@@ -1428,6 +1433,8 @@ enum class OptimizationMarker {
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
+ case OptimizationMarker::kLogFirstExecution:
+ return os << "OptimizationMarker::kLogFirstExecution";
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileOptimized:
@@ -1441,6 +1448,20 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
+enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
+
+inline std::ostream& operator<<(std::ostream& os,
+ SpeculationMode speculation_mode) {
+ switch (speculation_mode) {
+ case SpeculationMode::kAllowSpeculation:
+ return os << "SpeculationMode::kAllowSpeculation";
+ case SpeculationMode::kDisallowSpeculation:
+ return os << "SpeculationMode::kDisallowSpeculation";
+ }
+ UNREACHABLE();
+ return os;
+}
+
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
@@ -1455,7 +1476,9 @@ enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
C(PendingHandlerFP, pending_handler_fp) \
C(PendingHandlerSP, pending_handler_sp) \
C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp)
+ C(JSEntrySP, js_entry_sp) \
+ C(MicrotaskQueueBailoutIndex, microtask_queue_bailout_index) \
+ C(MicrotaskQueueBailoutCount, microtask_queue_bailout_count)
enum IsolateAddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7f403bcdb5..ef4d4b155a 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -110,7 +110,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
void HandleScope::ZapRange(Object** start, Object** end) {
DCHECK_LE(end - start, kHandleBlockSize);
for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = kHandleZapValue;
+ *reinterpret_cast<Address*>(p) = reinterpret_cast<Address>(kHandleZapValue);
}
}
#endif
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 62dc9007ad..e747ba2720 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -146,6 +146,7 @@
V(promise_string, "promise") \
V(proto_string, "__proto__") \
V(prototype_string, "prototype") \
+ V(proxy_string, "proxy") \
V(Proxy_string, "Proxy") \
V(query_colon_string, "(?:)") \
V(RangeError_string, "RangeError") \
@@ -155,6 +156,7 @@
V(reject_string, "reject") \
V(resolve_string, "resolve") \
V(return_string, "return") \
+ V(revoke_string, "revoke") \
V(script_string, "script") \
V(second_string, "second") \
V(setPrototypeOf_string, "setPrototypeOf") \
@@ -356,4 +358,17 @@
F(SCAVENGER_SCAVENGE_ROOTS) \
F(SCAVENGER_SCAVENGE_WEAK)
+#define TRACER_BACKGROUND_SCOPES(F) \
+ F(BACKGROUND_ARRAY_BUFFER_FREE) \
+ F(BACKGROUND_STORE_BUFFER) \
+ F(BACKGROUND_UNMAPPER) \
+ F(MC_BACKGROUND_EVACUATE_COPY) \
+ F(MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
+ F(MC_BACKGROUND_MARKING) \
+ F(MC_BACKGROUND_SWEEPING) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_COPY) \
+ F(MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_BACKGROUND_MARKING) \
+ F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL)
+
#endif // V8_HEAP_SYMBOLS_H_
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 1f41ffb2eb..cf0297bb2a 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -36,6 +36,9 @@ class ArrayBufferCollector::FreeingTask final : public CancelableTask {
private:
void RunInternal() final {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
heap_->array_buffer_collector()->FreeAllocations();
}
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 9634db951a..44ab099ba8 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,6 +7,7 @@
#include <stack>
#include <unordered_map>
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact-inl.h"
@@ -186,9 +187,14 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitFixedArray(Map* map, FixedArray* object) {
- int length = object->synchronized_length();
- int size = FixedArray::SizeFor(length);
+ // The synchronized_length() function checks that the length is a Smi.
+ // This is not necessarily the case if the array is being left-trimmed.
+ Object* length = object->unchecked_synchronized_length();
if (!ShouldVisit(object)) return 0;
+ // The cached length must be the actual length as the array is not black.
+ // Left trimming marks the array black before over-writing the length.
+ DCHECK(length->IsSmi());
+ int size = FixedArray::SizeFor(Smi::ToInt(length));
VisitMapPointer(object, object->map_slot());
FixedArray::BodyDescriptor::IterateBody(object, size, this);
return size;
@@ -381,6 +387,12 @@ SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
return reinterpret_cast<SeqTwoByteString*>(object);
}
+// Fixed array can become a free space during left trimming.
+template <>
+FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return reinterpret_cast<FixedArray*>(object);
+}
+
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
@@ -427,6 +439,8 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
+ TRACE_BACKGROUND_GC(heap_->tracer(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
LiveBytesMap* live_bytes = nullptr;
@@ -500,6 +514,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
+ DCHECK(heap_->use_tasks());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (task_count_ == 0) {
@@ -528,7 +543,7 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking) return;
+ if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index b9832d5433..30abe44ca6 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -6,6 +6,7 @@
#include <cstdarg>
+#include "src/base/atomic-utils.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
@@ -15,8 +16,8 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
- OldSpaces spaces(heap);
- for (OldSpace* space = spaces.next(); space != nullptr;
+ PagedSpaces spaces(heap);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
@@ -24,9 +25,11 @@ static size_t CountTotalHolesSize(Heap* heap) {
return holes_size;
}
-RuntimeCallStats::CounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
- return RuntimeCallStats::counters[kFirstGCIndexInRuntimeCallStats +
- static_cast<int>(id)];
+RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
+ STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
+ return static_cast<RuntimeCallCounterId>(
+ static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
+ static_cast<int>(id));
}
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
@@ -35,8 +38,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(!FLAG_runtime_stats)) return;
runtime_stats_ = tracer_->heap_->isolate()->counters()->runtime_call_stats();
- RuntimeCallStats::Enter(runtime_stats_, &timer_,
- GCTracer::RCSCounterFromScope(scope));
+ runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
}
GCTracer::Scope::~Scope() {
@@ -44,7 +46,28 @@ GCTracer::Scope::~Scope() {
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(runtime_stats_ == nullptr)) return;
- RuntimeCallStats::Leave(runtime_stats_, &timer_);
+ runtime_stats_->Leave(&timer_);
+}
+
+GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope)
+ : tracer_(tracer), scope_(scope), runtime_stats_enabled_(false) {
+ start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_LIKELY(!base::AsAtomic32::Relaxed_Load(&FLAG_runtime_stats))) return;
+ timer_.Start(&counter_, nullptr);
+ runtime_stats_enabled_ = true;
+}
+
+GCTracer::BackgroundScope::~BackgroundScope() {
+ double duration_ms =
+ tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_LIKELY(!runtime_stats_enabled_)) {
+ tracer_->AddBackgroundScopeSample(scope_, duration_ms, nullptr);
+ } else {
+ timer_.Stop();
+ tracer_->AddBackgroundScopeSample(scope_, duration_ms, &counter_);
+ }
}
const char* GCTracer::Scope::Name(ScopeId id) {
@@ -53,11 +76,27 @@ const char* GCTracer::Scope::Name(ScopeId id) {
return "V8.GC_" #scope;
switch (id) {
TRACER_SCOPES(CASE)
+ TRACER_BACKGROUND_SCOPES(CASE)
case Scope::NUMBER_OF_SCOPES:
break;
}
#undef CASE
- return "(unknown)";
+ UNREACHABLE();
+ return nullptr;
+}
+
+const char* GCTracer::BackgroundScope::Name(ScopeId id) {
+#define CASE(scope) \
+ case BackgroundScope::scope: \
+ return "V8.GC_" #scope;
+ switch (id) {
+ TRACER_BACKGROUND_SCOPES(CASE)
+ case BackgroundScope::NUMBER_OF_SCOPES:
+ break;
+ }
+#undef CASE
+ UNREACHABLE();
+ return nullptr;
}
GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
@@ -120,9 +159,11 @@ GCTracer::GCTracer(Heap* heap)
// We assume that MC_INCREMENTAL is the first scope so that we can properly
// map it to RuntimeCallStats.
STATIC_ASSERT(0 == Scope::MC_INCREMENTAL);
- CHECK(&RuntimeCallStats::GC_MC_INCREMENTAL ==
- RuntimeCallStats::counters[GCTracer::kFirstGCIndexInRuntimeCallStats]);
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
+ for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
+ background_counter_[i].total_duration_ms = 0;
+ background_counter_[i].runtime_call_counter = RuntimeCallCounter(nullptr);
+ }
}
void GCTracer::ResetForTesting() {
@@ -147,6 +188,11 @@ void GCTracer::ResetForTesting() {
recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
+ background_counter_[i].total_duration_ms = 0;
+ background_counter_[i].runtime_call_counter.Reset();
+ }
}
void GCTracer::NotifyYoungGenerationHandling(
@@ -267,6 +313,7 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.new_space_object_size, duration));
recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
current_.survived_new_space_object_size, duration));
+ FetchBackgroundMinorGCCounters();
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
current_.incremental_marking_bytes = incremental_marking_bytes_;
@@ -281,6 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
break;
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
@@ -289,10 +337,12 @@ void GCTracer::Stop(GarbageCollector collector) {
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
break;
case Event::START:
UNREACHABLE();
}
+ FetchBackgroundGeneralCounters();
heap_->UpdateTotalGCTime(duration);
@@ -467,6 +517,10 @@ void GCTracer::PrintNVP() const {
"scavenge.weak_global_handles.identify=%.2f "
"scavenge.weak_global_handles.process=%.2f "
"scavenge.parallel=%.2f "
+ "background.scavenge.parallel=%.2f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.2f "
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
"scavenge_throughput=%.f "
@@ -493,7 +547,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
- "unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
@@ -512,6 +565,10 @@ void GCTracer::PrintNVP() const {
current_
.scopes[Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL],
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
current_.scopes[Scope::MC_INCREMENTAL],
@@ -526,7 +583,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds());
break;
case Event::MINOR_MARK_COMPACTOR:
@@ -550,6 +606,12 @@ void GCTracer::PrintNVP() const {
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new_roots=%.2f "
"evacuate.update_pointers.slots=%.2f "
+ "background.mark=%.2f "
+ "background.evacuate.copy=%.2f "
+ "background.evacuate.update_pointers=%.2f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
@@ -569,6 +631,12 @@ void GCTracer::PrintNVP() const {
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY],
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
@@ -640,6 +708,13 @@ void GCTracer::PrintNVP() const {
"incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
"incremental_walltime_duration=%.f "
+ "background.mark=%.1f "
+ "background.sweep=%.1f "
+ "background.evacuate.copy=%.1f "
+ "background.evacuate.update_pointers=%.1f "
+ "background.array_buffer_free=%.2f "
+ "background.store_buffer=%.2f "
+ "background.unmapper=%.1f "
"total_size_before=%" PRIuS
" "
"total_size_after=%" PRIuS
@@ -663,7 +738,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
- "unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f "
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
@@ -732,10 +806,17 @@ void GCTracer::PrintNVP() const {
.longest_step,
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
IncrementalMarkingSpeedInBytesPerMillisecond(),
- incremental_walltime_duration, current_.start_object_size,
- current_.end_object_size, current_.start_holes_size,
- current_.end_holes_size, allocated_since_last_gc,
- heap_->promoted_objects_size(),
+ incremental_walltime_duration,
+ current_.scopes[Scope::MC_BACKGROUND_MARKING],
+ current_.scopes[Scope::MC_BACKGROUND_SWEEPING],
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY],
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::BACKGROUND_ARRAY_BUFFER_FREE],
+ current_.scopes[Scope::BACKGROUND_STORE_BUFFER],
+ current_.scopes[Scope::BACKGROUND_UNMAPPER],
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
heap_->semi_space_copied_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
@@ -743,7 +824,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
- heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds(),
CompactionSpeedInBytesPerMillisecond());
break;
@@ -899,5 +979,72 @@ void GCTracer::NotifyIncrementalMarkingStart() {
incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs();
}
+void GCTracer::FetchBackgroundMarkCompactCounters() {
+ FetchBackgroundCounters(Scope::FIRST_MC_BACKGROUND_SCOPE,
+ Scope::LAST_MC_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_MC_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_MC_BACKGROUND_SCOPE);
+ heap_->isolate()->counters()->background_marking()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_MARKING]));
+ heap_->isolate()->counters()->background_sweeping()->AddSample(
+ static_cast<int>(current_.scopes[Scope::MC_BACKGROUND_SWEEPING]));
+}
+
+void GCTracer::FetchBackgroundMinorGCCounters() {
+ FetchBackgroundCounters(Scope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
+ Scope::LAST_MINOR_GC_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_MINOR_GC_BACKGROUND_SCOPE);
+ heap_->isolate()->counters()->background_scavenger()->AddSample(
+ static_cast<int>(
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]));
+}
+
+void GCTracer::FetchBackgroundGeneralCounters() {
+ FetchBackgroundCounters(Scope::FIRST_GENERAL_BACKGROUND_SCOPE,
+ Scope::LAST_GENERAL_BACKGROUND_SCOPE,
+ BackgroundScope::FIRST_GENERAL_BACKGROUND_SCOPE,
+ BackgroundScope::LAST_GENERAL_BACKGROUND_SCOPE);
+}
+
+void GCTracer::FetchBackgroundCounters(int first_global_scope,
+ int last_global_scope,
+ int first_background_scope,
+ int last_background_scope) {
+ DCHECK_EQ(last_global_scope - first_global_scope,
+ last_background_scope - first_background_scope);
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ int background_mc_scopes = last_background_scope - first_background_scope + 1;
+ for (int i = 0; i < background_mc_scopes; i++) {
+ current_.scopes[first_global_scope + i] +=
+ background_counter_[first_background_scope + i].total_duration_ms;
+ background_counter_[first_background_scope + i].total_duration_ms = 0;
+ }
+ if (V8_LIKELY(!FLAG_runtime_stats)) return;
+ RuntimeCallStats* runtime_stats =
+ heap_->isolate()->counters()->runtime_call_stats();
+ if (!runtime_stats) return;
+ for (int i = 0; i < background_mc_scopes; i++) {
+ runtime_stats
+ ->GetCounter(GCTracer::RCSCounterFromScope(
+ static_cast<Scope::ScopeId>(first_global_scope + i)))
+ ->Add(&background_counter_[first_background_scope + i]
+ .runtime_call_counter);
+ background_counter_[first_background_scope + i]
+ .runtime_call_counter.Reset();
+ }
+}
+
+void GCTracer::AddBackgroundScopeSample(
+ BackgroundScope::ScopeId scope, double duration,
+ RuntimeCallCounter* runtime_call_counter) {
+ base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ BackgroundCounter& counter = background_counter_[scope];
+ counter.total_duration_ms += duration;
+ if (runtime_call_counter) {
+ counter.runtime_call_counter.Add(runtime_call_counter);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 2a0c47692e..fb0f001e3d 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -31,6 +31,11 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
GCTracer::Scope::Name(gc_tracer_scope_id))
+#define TRACE_BACKGROUND_GC(tracer, scope_id) \
+ GCTracer::BackgroundScope background_scope(tracer, scope_id); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
+ GCTracer::BackgroundScope::Name(scope_id))
+
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class V8_EXPORT_PRIVATE GCTracer {
@@ -61,7 +66,7 @@ class V8_EXPORT_PRIVATE GCTracer {
public:
enum ScopeId {
#define DEFINE_SCOPE(scope) scope,
- TRACER_SCOPES(DEFINE_SCOPE)
+ TRACER_SCOPES(DEFINE_SCOPE) TRACER_BACKGROUND_SCOPES(DEFINE_SCOPE)
#undef DEFINE_SCOPE
NUMBER_OF_SCOPES,
@@ -69,7 +74,13 @@ class V8_EXPORT_PRIVATE GCTracer {
LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES =
- LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1
+ LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
+ FIRST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_ARRAY_BUFFER_FREE,
+ LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
+ FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
+ FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
};
Scope(GCTracer* tracer, ScopeId scope);
@@ -86,6 +97,34 @@ class V8_EXPORT_PRIVATE GCTracer {
DISALLOW_COPY_AND_ASSIGN(Scope);
};
+ class V8_EXPORT_PRIVATE BackgroundScope {
+ public:
+ enum ScopeId {
+#define DEFINE_SCOPE(scope) scope,
+ TRACER_BACKGROUND_SCOPES(DEFINE_SCOPE)
+#undef DEFINE_SCOPE
+ NUMBER_OF_SCOPES,
+ FIRST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_ARRAY_BUFFER_FREE,
+ LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
+ FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
+ FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
+ LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
+ };
+ BackgroundScope(GCTracer* tracer, ScopeId scope);
+ ~BackgroundScope();
+
+ static const char* Name(ScopeId id);
+
+ private:
+ GCTracer* tracer_;
+ ScopeId scope_;
+ double start_time_;
+ RuntimeCallTimer timer_;
+ RuntimeCallCounter counter_;
+ bool runtime_stats_enabled_;
+ DISALLOW_COPY_AND_ASSIGN(BackgroundScope);
+ };
class Event {
public:
@@ -159,9 +198,8 @@ class V8_EXPORT_PRIVATE GCTracer {
};
static const int kThroughputTimeFrameMs = 5000;
- static const int kFirstGCIndexInRuntimeCallStats = 0;
- static RuntimeCallStats::CounterId RCSCounterFromScope(Scope::ScopeId id);
+ static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
explicit GCTracer(Heap* heap);
@@ -273,9 +311,16 @@ class V8_EXPORT_PRIVATE GCTracer {
}
}
+ void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
+ RuntimeCallCounter* runtime_call_counter);
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
+ FRIEND_TEST(GCTracerTest, BackgroundScavengerScope);
+ FRIEND_TEST(GCTracerTest, BackgroundMinorMCScope);
+ FRIEND_TEST(GCTracerTest, BackgroundMajorMCScope);
+ FRIEND_TEST(GCTracerTest, MultithreadedBackgroundScope);
FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughput);
FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime);
FRIEND_TEST(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime);
@@ -284,6 +329,11 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
+ struct BackgroundCounter {
+ double total_duration_ms;
+ RuntimeCallCounter runtime_call_counter;
+ };
+
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
@@ -315,6 +365,13 @@ class V8_EXPORT_PRIVATE GCTracer {
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
}
+ void FetchBackgroundCounters(int first_global_scope, int last_global_scope,
+ int first_background_scope,
+ int last_background_scope);
+ void FetchBackgroundMinorGCCounters();
+ void FetchBackgroundMarkCompactCounters();
+ void FetchBackgroundGeneralCounters();
+
// Pointer to the heap that owns this tracer.
Heap* heap_;
@@ -368,8 +425,12 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<double> recorded_context_disposal_times_;
base::RingBuffer<double> recorded_survival_ratios_;
+ base::Mutex background_counter_mutex_;
+ BackgroundCounter background_counter_[BackgroundScope::NUMBER_OF_SCOPES];
+
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index bf9eb2874f..f4e5c1fe13 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -52,6 +52,13 @@ ROOT_LIST(ROOT_ACCESSOR)
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ Map* Heap::name##_map() { \
+ return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
+ }
+DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) \
String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
@@ -243,11 +250,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
-#ifdef DEBUG
- if (FLAG_gc_interval >= 0 && !always_allocate() &&
- Heap::allocation_timeout_-- <= 0) {
- return AllocationResult::Retry(space);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
+ return AllocationResult::Retry(space);
+ }
}
+#endif
+#ifdef DEBUG
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
@@ -314,10 +324,10 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
- }
-
- if (FLAG_trace_allocation_stack_interval > 0) {
- if (!FLAG_verify_predictable) ++allocations_count_;
+ } else if (FLAG_fuzzer_gc_analysis) {
+ ++allocations_count_;
+ } else if (FLAG_trace_allocation_stack_interval > 0) {
+ ++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@@ -349,10 +359,11 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
+ } else if (FLAG_fuzzer_gc_analysis) {
+ ++allocations_count_;
}
}
-
void Heap::UpdateAllocationsHash(HeapObject* object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5aed117903..49c2eccb2d 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -43,9 +43,11 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
+#include "src/heap/stress-marking-observer.h"
+#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects/object-macros.h"
+#include "src/objects/data-handler.h"
#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
@@ -60,6 +62,9 @@
#include "src/v8.h"
#include "src/vm-state-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -89,14 +94,12 @@ void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetSerializedTemplates(FixedArray* templates) {
- DCHECK_EQ(empty_fixed_array(), serialized_templates());
+void Heap::SetSerializedObjects(FixedArray* objects) {
DCHECK(isolate()->serializer_enabled());
- set_serialized_templates(templates);
+ set_serialized_objects(objects);
}
void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
- DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
DCHECK(isolate()->serializer_enabled());
set_serialized_global_proxy_sizes(sizes);
}
@@ -172,13 +175,13 @@ Heap::Heap()
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
+ stress_marking_observer_(nullptr),
+ stress_scavenge_observer_(nullptr),
+ max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
-#ifdef DEBUG
- allocation_timeout_(0),
-#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
tracer_(nullptr),
@@ -227,7 +230,12 @@ Heap::Heap()
use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr) {
+ pending_layout_change_object_(nullptr)
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ ,
+ allocation_timeout_(0)
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+{
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -296,9 +304,9 @@ size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- total += space->Available();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ total += it.next()->Available();
}
return total;
}
@@ -356,30 +364,6 @@ void Heap::SetGCState(HeapState state) {
gc_state_ = state;
}
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsBeforeGC() {
-// Heap::ReportHeapStatistics will also log NewSpace statistics when
-// compiled --log-gc is set. The following logic is used to avoid
-// double logging.
-#ifdef DEBUG
- if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
- if (FLAG_heap_stats) {
- ReportHeapStatistics("Before GC");
- } else if (FLAG_log_gc) {
- new_space_->ReportStatistics();
- }
- if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
-#else
- if (FLAG_log_gc) {
- new_space_->CollectStatistics();
- new_space_->ReportStatistics();
- new_space_->ClearHistograms();
- }
-#endif // DEBUG
-}
-
-
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
@@ -437,21 +421,7 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
-// Similar to the before GC, we use some complicated logic to ensure that
-// NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG)
- if (FLAG_heap_stats) {
- new_space_->CollectStatistics();
- ReportHeapStatistics("After GC");
- } else if (FLAG_log_gc) {
- new_space_->ReportStatistics();
- }
-#else
- if (FLAG_log_gc) new_space_->ReportStatistics();
-#endif // DEBUG
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
int count = deferred_counters_[i];
@@ -630,8 +600,6 @@ void Heap::GarbageCollectionPrologue() {
DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
-
- ReportStatisticsBeforeGC();
#endif // DEBUG
if (new_space_->IsAtMaximumCapacity()) {
@@ -650,9 +618,9 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- total += space->SizeOfObjects();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ total += it.next()->SizeOfObjects();
}
return total;
}
@@ -712,6 +680,34 @@ void Heap::MergeAllocationSitePretenuringFeedback(
}
}
+void Heap::AddAllocationObserversToAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer) {
+ DCHECK(observer && new_space_observer);
+
+ for (SpaceIterator it(this); it.has_next();) {
+ Space* space = it.next();
+ if (space == new_space()) {
+ space->AddAllocationObserver(new_space_observer);
+ } else {
+ space->AddAllocationObserver(observer);
+ }
+ }
+}
+
+void Heap::RemoveAllocationObserversFromAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer) {
+ DCHECK(observer && new_space_observer);
+
+ for (SpaceIterator it(this); it.has_next();) {
+ Space* space = it.next();
+ if (space == new_space()) {
+ space->RemoveAllocationObserver(new_space_observer);
+ } else {
+ space->RemoveAllocationObserver(observer);
+ }
+ }
+}
+
class Heap::SkipStoreBufferScope {
public:
explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
@@ -1027,7 +1023,10 @@ class GCCallbacksScope {
void Heap::HandleGCRequest() {
- if (HighMemoryPressure()) {
+ if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
+ CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
+ stress_scavenge_observer_->RequestedGCDone();
+ } else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (incremental_marking()->request_type() ==
@@ -1129,7 +1128,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
InvokeOutOfMemoryCallback();
}
RuntimeCallTimerScope runtime_timer(
- isolate(), &RuntimeCallStats::GC_Custom_AllAvailableGarbage);
+ isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
@@ -1217,13 +1216,14 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
-#ifdef DEBUG
- // Reset the allocation timeout to the GC interval, but make sure to
- // allow at least a few allocations after a collection. The reason
- // for this is that we have a lot of allocation sequences and we
- // assume that a garbage collection will allow the subsequent
- // allocation attempts to go through.
- allocation_timeout_ = Max(6, FLAG_gc_interval);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // Reset the allocation timeout, but make sure to allow at least a few
+ // allocations after a collection. The reason for this is that we have a lot
+ // of allocation sequences and we assume that a garbage collection will allow
+ // the subsequent allocation attempts to go through.
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
+ }
#endif
EnsureFillerObjectAtTop();
@@ -1680,8 +1680,8 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(isolate(),
- &RuntimeCallStats::GCPrologueCallback);
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), RuntimeCallCounterId::kGCPrologueCallback);
for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1691,8 +1691,8 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(isolate(),
- &RuntimeCallStats::GCEpilogueCallback);
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1741,6 +1741,8 @@ void Heap::MinorMarkCompact() {
PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
+ CodeSpaceMemoryModificationScope code_modifcation(this);
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
minor_mark_compact_collector()->CollectGarbage();
@@ -1826,7 +1828,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
- mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1845,7 +1847,7 @@ void Heap::EvacuateYoungGeneration() {
if (!new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
- new_space()->ResetAllocationInfo();
+ new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
// Fix up special trackers.
@@ -1887,6 +1889,9 @@ class ScavengingTask final : public ItemParallelJob::Task {
barrier_(barrier) {}
void RunInParallel() final {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
double scavenging_time = 0.0;
{
barrier_->Start();
@@ -1942,14 +1947,8 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
- if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
- memory_allocator_->unmapper()->NumberOfDelayedChunks() >
- static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
- mark_compact_collector()->EnsureSweepingCompleted();
- }
- // TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
- mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
@@ -1959,7 +1958,7 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
- new_space_->ResetAllocationInfo();
+ new_space_->ResetLinearAllocationArea();
ItemParallelJob job(isolate()->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
@@ -2415,8 +2414,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
map->set_bit_field(0);
map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptors::encode(true) |
- Map::ConstructionCounter::encode(Map::kNoSlackTracking);
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_weak_cell_cache(Smi::kZero);
map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
@@ -2464,14 +2463,14 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
// |layout_descriptor| are set.
map->set_visitor_id(Map::GetVisitorId(map));
map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_bit_field2(Map::IsExtensibleBit::kMask);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptors::encode(true) |
- Map::ConstructionCounter::encode(Map::kNoSlackTracking);
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
map->set_new_target_is_base(true);
-
+ if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
}
@@ -2608,6 +2607,10 @@ void Heap::CreateJSConstructEntryStub() {
set_js_construct_entry_code(*stub.GetCode());
}
+void Heap::CreateJSRunMicrotasksEntryStub() {
+ JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
+ set_js_run_microtasks_entry_code(*stub.GetCode());
+}
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
@@ -2639,6 +2642,7 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
+ Heap::CreateJSRunMicrotasksEntryStub();
}
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
@@ -2656,7 +2660,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
- case kSerializedTemplatesRootIndex:
+ case kSerializedObjectsRootIndex:
case kSerializedGlobalProxySizesRootIndex:
case kPublicSymbolTableRootIndex:
case kApiSymbolTableRootIndex:
@@ -3984,7 +3988,9 @@ AllocationResult Heap::AllocateFeedbackVector(SharedFunctionInfo* shared,
result->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
FeedbackVector* vector = FeedbackVector::cast(result);
vector->set_shared_function_info(shared);
- vector->set_optimized_code_cell(Smi::FromEnum(OptimizationMarker::kNone));
+ vector->set_optimized_code_cell(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone));
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
@@ -4546,9 +4552,9 @@ void Heap::CollectCodeStatistics() {
void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
- AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->Print();
+
+ for (SpaceIterator it(this); it.has_next();) {
+ it.next()->Print();
}
}
@@ -4559,37 +4565,6 @@ void Heap::ReportCodeStatistics(const char* title) {
CodeStatistics::ReportCodeStatistics(isolate());
}
-
-// This function expects that NewSpace's allocated objects histogram is
-// populated (via a call to CollectStatistics or else as a side effect of a
-// just-completed scavenge collection).
-void Heap::ReportHeapStatistics(const char* title) {
- USE(title);
- PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
- gc_count_);
- PrintF("old_generation_allocation_limit_ %" PRIuS "\n",
- old_generation_allocation_limit_);
-
- PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
- isolate_->global_handles()->PrintStats();
- PrintF("\n");
-
- PrintF("Heap statistics : ");
- memory_allocator()->ReportStatistics();
- PrintF("To space : ");
- new_space_->ReportStatistics();
- PrintF("Old space : ");
- old_space_->ReportStatistics();
- PrintF("Code space : ");
- code_space_->ReportStatistics();
- PrintF("Map space : ");
- map_space_->ReportStatistics();
- PrintF("Large object space : ");
- lo_space_->ReportStatistics();
- PrintF(">>>>>> ========================================= >>>>>>\n");
-}
-
#endif // DEBUG
const char* Heap::GarbageCollectionReasonToString(
@@ -4887,7 +4862,8 @@ void Heap::ZapFromSpace() {
PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
- Memory::Address_at(cursor) = kFromSpaceZapValue;
+ Memory::Address_at(cursor) =
+ reinterpret_cast<Address>(kFromSpaceZapValue);
}
}
}
@@ -4904,8 +4880,11 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
+ mode != VISIT_FOR_SERIALIZATION) {
// Scavenge collections have special processing for this.
+ // Do not visit for serialization, since the external string table will
+ // be populated from scratch upon deserialization.
external_string_table_.IterateAll(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
@@ -5007,7 +4986,10 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over global handles.
switch (mode) {
- case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
+ case VISIT_FOR_SERIALIZATION:
+ // Global handles are not iterated by the serializer. Values referenced by
+ // global handles need to be added manually.
+ break;
case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
@@ -5027,11 +5009,14 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kGlobalHandles);
- // Iterate over eternal handles.
- if (isMinorGC) {
- isolate_->eternal_handles()->IterateNewSpaceRoots(v);
- } else {
- isolate_->eternal_handles()->IterateAllRoots(v);
+ // Iterate over eternal handles. Eternal handles are not iterated by the
+ // serializer. Values referenced by eternal handles need to be added manually.
+ if (mode != VISIT_FOR_SERIALIZATION) {
+ if (isMinorGC) {
+ isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+ } else {
+ isolate_->eternal_handles()->IterateAllRoots(v);
+ }
}
v->Synchronize(VisitorSynchronization::kEternalHandles);
@@ -5046,13 +5031,11 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kStrongRoots);
// Iterate over the partial snapshot cache unless serializing.
- if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
+ if (mode != VISIT_FOR_SERIALIZATION) {
SerializerDeserializer::Iterate(isolate_, v);
+ // We don't do a v->Synchronize call here because the serializer and the
+ // deserializer are deliberately out of sync here.
}
- // We don't do a v->Synchronize call here, because in debug mode that will
- // output a flag to the snapshot. However at this point the serializer and
- // deserializer are deliberately a little unsynchronized (see above) so the
- // checking of the sync flag in the snapshot would fail.
}
@@ -5095,8 +5078,8 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
- static_cast<uint64_t>(max_semi_space_size_)));
+ max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(max_semi_space_size_));
if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
// Start with at least 1*MB semi-space on machines with a lot of memory.
@@ -5470,13 +5453,20 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (bytes_to_limit > 0) {
double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
- if (FLAG_trace_incremental_marking) {
+ if (FLAG_trace_stress_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] %.2lf%% of the memory limit reached\n",
current_percent);
}
- if (static_cast<int>(current_percent) >= stress_marking_percentage_) {
+ if (FLAG_fuzzer_gc_analysis) {
+ // Skips values >=100% since they already trigger marking.
+ if (current_percent < 100.0) {
+ max_marking_limit_reached_ =
+ std::max(max_marking_limit_reached_, current_percent);
+ }
+ } else if (static_cast<int>(current_percent) >=
+ stress_marking_percentage_) {
stress_marking_percentage_ = NextStressMarkingLimit();
return IncrementalMarkingLimit::kHardLimit;
}
@@ -5521,13 +5511,13 @@ void Heap::DisableInlineAllocation() {
CodeSpaceMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
- space->EmptyAllocationInfo();
+ space->FreeLinearAllocationArea();
}
}
bool Heap::SetUp() {
-#ifdef DEBUG
- allocation_timeout_ = FLAG_gc_interval;
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ allocation_timeout_ = NextAllocationTimeout();
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@@ -5543,7 +5533,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5632,6 +5622,13 @@ bool Heap::SetUp() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
+ stress_marking_observer_ = new StressMarkingObserver(*this);
+ AddAllocationObserversToAllSpaces(stress_marking_observer_,
+ stress_marking_observer_);
+ }
+ if (FLAG_stress_scavenge > 0) {
+ stress_scavenge_observer_ = new StressScavengeObserver(*this);
+ new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
@@ -5667,11 +5664,34 @@ void Heap::ClearStackLimits() {
roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
+int Heap::NextAllocationTimeout(int current_timeout) {
+ if (FLAG_random_gc_interval > 0) {
+ // If current timeout hasn't reached 0 the GC was caused by something
+ // different than --stress-atomic-gc flag and we don't update the timeout.
+ if (current_timeout <= 0) {
+ return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
+ } else {
+ return current_timeout;
+ }
+ }
+ return FLAG_gc_interval;
+}
+
void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
+void Heap::PrintMaxMarkingLimitReached() {
+ PrintF("\n### Maximum marking limit reached = %.02lf\n",
+ max_marking_limit_reached_);
+}
+
+void Heap::PrintMaxNewSpaceSizeReached() {
+ PrintF("\n### Maximum new space size reached = %.02lf\n",
+ stress_scavenge_observer_->MaxNewSpaceSizeReached());
+}
+
int Heap::NextStressMarkingLimit() {
return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
}
@@ -5734,14 +5754,35 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_verify_predictable) {
+ if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}
+ if (FLAG_fuzzer_gc_analysis) {
+ if (FLAG_stress_marking > 0) {
+ PrintMaxMarkingLimitReached();
+ }
+ if (FLAG_stress_scavenge > 0) {
+ PrintMaxNewSpaceSizeReached();
+ }
+ }
+
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
+ if (FLAG_stress_marking > 0) {
+ RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
+ stress_marking_observer_);
+ delete stress_marking_observer_;
+ stress_marking_observer_ = nullptr;
+ }
+ if (FLAG_stress_scavenge > 0) {
+ new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
+ delete stress_scavenge_observer_;
+ stress_scavenge_observer_ = nullptr;
+ }
+
if (mark_compact_collector_ != nullptr) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
@@ -6086,22 +6127,6 @@ void Heap::RecordWritesIntoCode(Code* code) {
}
}
-Space* AllSpaces::next() {
- switch (counter_++) {
- case NEW_SPACE:
- return heap_->new_space();
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- case LO_SPACE:
- return heap_->lo_space();
- default:
- return nullptr;
- }
-}
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
@@ -6116,18 +6141,6 @@ PagedSpace* PagedSpaces::next() {
}
}
-
-OldSpace* OldSpaces::next() {
- switch (counter_++) {
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- default:
- return nullptr;
- }
-}
-
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_SPACE - 1) {}
@@ -6380,9 +6393,9 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
uintptr_t p = reinterpret_cast<uintptr_t>(page);
// Tag the page pointer to make it findable in the dump file.
if (compacted) {
- p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
+ p ^= 0xC1EAD & (Page::kPageSize - 1); // Cleared.
} else {
- p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
+ p ^= 0x1D1ED & (Page::kPageSize - 1); // I died.
}
remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
reinterpret_cast<Address>(p);
@@ -6485,6 +6498,22 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return false;
}
+size_t Heap::NumberOfNativeContexts() {
+ int result = 0;
+ Object* context = native_contexts_list();
+ while (!context->IsUndefined(isolate())) {
+ ++result;
+ Context* native_context = Context::cast(context);
+ context = native_context->next_context_link();
+ }
+ return result;
+}
+
+size_t Heap::NumberOfDetachedContexts() {
+ // The detached_contexts() array has two entries per detached context.
+ return detached_contexts()->length() / 2;
+}
+
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -6574,6 +6603,17 @@ void Heap::CreateObjectStats() {
}
}
+void AllocationObserver::AllocationStep(int bytes_allocated,
+ Address soon_object, size_t size) {
+ DCHECK_GE(bytes_allocated, 0);
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
+ step_size_ = GetNextStepSize();
+ bytes_to_next_step_ = step_size_;
+ }
+}
+
namespace {
Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
@@ -6610,15 +6650,13 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
}
- if (!code_space()->Contains(inner_pointer)) {
- return nullptr;
- }
+ DCHECK(code_space()->Contains(inner_pointer));
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), code_space());
- mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
+ mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = code_space()->top();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 72d74c9715..3d8234f392 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -42,6 +42,7 @@ class BytecodeArray;
class CodeDataContainer;
class DeoptimizationData;
class HandlerTable;
+class IncrementalMarking;
class JSArrayBuffer;
using v8::MemoryPressureLevel;
@@ -105,6 +106,7 @@ using v8::MemoryPressureLevel;
V(Map, script_context_table_map, ScriptContextTableMap) \
/* Maps */ \
V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, array_list_map, ArrayListMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, ordered_hash_map_map, OrderedHashMapMap) \
@@ -245,7 +247,7 @@ using v8::MemoryPressureLevel;
FeedbackVectorsForProfilingTools) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
- V(FixedArray, serialized_templates, SerializedTemplates) \
+ V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
/* DeserializeLazy handlers for lazy bytecode deserialization */ \
@@ -255,7 +257,8 @@ using v8::MemoryPressureLevel;
DeserializeLazyHandlerExtraWide) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode)
+ V(Code, js_construct_entry_code, JsConstructEntryCode) \
+ V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -411,10 +414,11 @@ class ObjectStats;
class Page;
class PagedSpace;
class RootVisitor;
-class Scavenger;
class ScavengeJob;
+class Scavenger;
class Space;
class StoreBuffer;
+class StressScavengeObserver;
class TracePossibleWrapperReporter;
class WeakObjectRetainer;
@@ -523,78 +527,51 @@ struct CommentStatistic {
};
#endif
-class NumberAndSizeInfo BASE_EMBEDDED {
+class Heap {
public:
- NumberAndSizeInfo() : number_(0), bytes_(0) {}
+ // Declare all the root indices. This defines the root list order.
+ // clang-format off
+ enum RootListIndex {
+#define DECL(type, name, camel_name) k##camel_name##RootIndex,
+ STRONG_ROOT_LIST(DECL)
+#undef DECL
- int number() const { return number_; }
- void increment_number(int num) { number_ += num; }
+#define DECL(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(DECL)
+#undef DECL
- int bytes() const { return bytes_; }
- void increment_bytes(int size) { bytes_ += size; }
+#define DECL(name) k##name##RootIndex,
+ PRIVATE_SYMBOL_LIST(DECL)
+#undef DECL
- void clear() {
- number_ = 0;
- bytes_ = 0;
- }
+#define DECL(name, description) k##name##RootIndex,
+ PUBLIC_SYMBOL_LIST(DECL)
+ WELL_KNOWN_SYMBOL_LIST(DECL)
+#undef DECL
- private:
- int number_;
- int bytes_;
-};
+#define DECL(accessor_name, AccessorName) k##AccessorName##AccessorRootIndex,
+ ACCESSOR_INFO_LIST(DECL)
+#undef DECL
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo : public NumberAndSizeInfo {
- public:
- HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
+#define DECL(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECL)
+#undef DECL
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
+#define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
+ DATA_HANDLER_LIST(DECL)
+#undef DECL
- private:
- const char* name_;
-};
+ kStringTableRootIndex,
-class Heap {
- public:
- // Declare all the root indices. This defines the root list order.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
- PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
- PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
- WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
-
-#define ACCESSOR_INDEX_DECLARATION(accessor_name, AccessorName) \
- k##AccessorName##AccessorRootIndex,
- ACCESSOR_INFO_LIST(ACCESSOR_INDEX_DECLARATION)
-#undef ACCESSOR_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
-
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
- kRootListLength,
+#define DECL(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(DECL)
+#undef DECL
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
kSmiRootsStart = kStringTableRootIndex + 1
};
+ // clang-format on
enum FindMementoMode { kForRuntime, kForGC };
@@ -626,15 +603,15 @@ class Heap {
#endif
// Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB =
+ static const int kMinSemiSpaceSizeInKB =
1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
- static const size_t kMaxSemiSpaceSizeInKB =
+ static const int kMaxSemiSpaceSizeInKB =
16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
// The old space size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
- static const size_t kMinOldGenerationSize = 128 * kPointerMultiplier;
- static const size_t kMaxOldGenerationSize = 1024 * kPointerMultiplier;
+ static const int kMinOldGenerationSize = 128 * kPointerMultiplier;
+ static const int kMaxOldGenerationSize = 1024 * kPointerMultiplier;
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -875,7 +852,7 @@ class Heap {
inline int NextScriptId();
inline int GetNextTemplateSerialNumber();
- void SetSerializedTemplates(FixedArray* templates);
+ void SetSerializedObjects(FixedArray* objects);
void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
@@ -1045,6 +1022,11 @@ class Heap {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
+#define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
+ inline Map* name##_map();
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
+#undef DATA_HANDLER_MAP_ACCESSOR
+
#define STRING_ACCESSOR(name, str) inline String* name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
@@ -1352,6 +1334,12 @@ class Heap {
bool GetObjectTypeName(size_t index, const char** object_type,
const char** object_sub_type);
+ // The total number of native contexts object on the heap.
+ size_t NumberOfNativeContexts();
+ // The total number of native contexts that were detached but were not
+ // garbage collected yet.
+ size_t NumberOfDetachedContexts();
+
// ===========================================================================
// Code statistics. ==========================================================
// ===========================================================================
@@ -1372,10 +1360,10 @@ class Heap {
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(
- physical_memory / i::MB / old_space_physical_memory_factor *
- kPointerMultiplier);
+ const int old_space_physical_memory_factor = 4;
+ int computed_size =
+ static_cast<int>(physical_memory / i::MB /
+ old_space_physical_memory_factor * kPointerMultiplier);
return Max(Min(computed_size, kMaxOldGenerationSize),
kMinOldGenerationSize);
}
@@ -1387,11 +1375,11 @@ class Heap {
uint64_t capped_physical_memory =
Max(Min(physical_memory, max_physical_memory), min_physical_memory);
// linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
- size_t semi_space_size_in_kb =
- static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
- (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
- (max_physical_memory - min_physical_memory) +
- kMinSemiSpaceSizeInKB);
+ int semi_space_size_in_kb =
+ static_cast<int>(((capped_physical_memory - min_physical_memory) *
+ (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
+ (max_physical_memory - min_physical_memory) +
+ kMinSemiSpaceSizeInKB);
return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
}
@@ -1571,6 +1559,19 @@ class Heap {
const PretenuringFeedbackMap& local_pretenuring_feedback);
// ===========================================================================
+ // Allocation tracking. ======================================================
+ // ===========================================================================
+
+ // Adds {new_space_observer} to new space and {observer} to any other space.
+ void AddAllocationObserversToAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer);
+
+ // Removes {new_space_observer} from new space and {observer} from any other
+ // space.
+ void RemoveAllocationObserversFromAllSpaces(
+ AllocationObserver* observer, AllocationObserver* new_space_observer);
+
+ // ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -1599,21 +1600,22 @@ class Heap {
void VerifyRememberedSetFor(HeapObject* object);
#endif
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+#endif
+
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-
void Print();
void PrintHandles();
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
+ // Report code statistics.
void ReportCodeStatistics(const char* title);
#endif
void* GetRandomMmapAddr() {
- void* result = base::OS::GetRandomMmapAddr();
+ void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
@@ -1624,7 +1626,7 @@ class Heap {
// killed. Confine the hint to a 32-bit section of the virtual address
// space. See crbug.com/700928.
uintptr_t offset =
- reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
#endif // V8_OS_MACOSX
@@ -1816,6 +1818,7 @@ class Heap {
// because of a gcc-4.4 bug that assigns wrong vtable entries.
NO_INLINE(void CreateJSEntryStub());
NO_INLINE(void CreateJSConstructEntryStub());
+ NO_INLINE(void CreateJSRunMicrotasksEntryStub());
void CreateFixedStubs();
@@ -1837,8 +1840,7 @@ class Heap {
// the old space.
void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
+ // Record statistics after garbage collection.
void ReportStatisticsAfterGC();
// Creates and installs the full-sized number string cache.
@@ -1865,10 +1867,14 @@ class Heap {
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
+ int NextAllocationTimeout(int current_timeout = 0);
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();
+ void PrintMaxMarkingLimitReached();
+ void PrintMaxNewSpaceSizeReached();
+
int NextStressMarkingLimit();
void AddToRingBuffer(const char* string);
@@ -2387,6 +2393,17 @@ class Heap {
// is reached.
int stress_marking_percentage_;
+ // Observer that causes more frequent checks for reached incremental marking
+ // limit.
+ AllocationObserver* stress_marking_observer_;
+
+ // Observer that can cause early scavenge start.
+ StressScavengeObserver* stress_scavenge_observer_;
+
+ // The maximum percent of the marking limit reached wihout causing marking.
+ // This is tracked when specyfing --fuzzer-gc-analysis.
+ double max_marking_limit_reached_;
+
// How many mark-sweep collections happened.
unsigned int ms_count_;
@@ -2400,13 +2417,6 @@ class Heap {
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
-#ifdef DEBUG
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-#endif // DEBUG
-
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
@@ -2552,6 +2562,13 @@ class Heap {
HeapObject* pending_layout_change_object_;
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // If the --gc-interval flag is set to a positive value, this
+ // variable holds the value indicating the number of allocations
+ // remain until the next failure and garbage collection.
+ int allocation_timeout_;
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
std::map<HeapObject*, HeapObject*> retainer_;
std::map<HeapObject*, Root> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
@@ -2685,37 +2702,10 @@ class VerifySmisVisitor : public RootVisitor {
void VisitRootPointers(Root root, Object** start, Object** end) override;
};
-
-// Space iterator for iterating over all spaces of the heap. Returns each space
-// in turn, and null when it is done.
-class AllSpaces BASE_EMBEDDED {
- public:
- explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
- Space* next();
-
- private:
- Heap* heap_;
- int counter_;
-};
-
-
-// Space iterator for iterating over all old spaces of the heap: Old space
-// and code space. Returns each space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
- public:
- explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
- OldSpace* next();
-
- private:
- Heap* heap_;
- int counter_;
-};
-
-
// Space iterator for iterating over all the paged spaces of the heap: Map
// space, old space, code space and cell space. Returns
// each space in turn, and null when it is done.
-class PagedSpaces BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
public:
explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
PagedSpace* next();
@@ -2800,15 +2790,7 @@ class AllocationObserver {
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
- void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
- bytes_to_next_step_ -= bytes_allocated;
- if (bytes_to_next_step_ <= 0) {
- Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
- size);
- step_size_ = GetNextStepSize();
- bytes_to_next_step_ = step_size_;
- }
- }
+ void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
protected:
intptr_t step_size() const { return step_size_; }
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 8acbd31ec7..fa6082ae7c 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -21,7 +21,7 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
- if (!task_pending_) {
+ if (!task_pending_ && heap->use_tasks()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a046dff4b0..4868adc26e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -34,7 +34,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
Heap* heap = incremental_marking_.heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
- heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
+ heap->isolate(),
+ RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
if (incremental_marking_.black_allocation() && addr != nullptr) {
// AdvanceIncrementalMarkingOnAllocation can start black allocation.
@@ -363,16 +364,8 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
SetState(SWEEPING);
}
- SpaceIterator it(heap_);
- while (it.has_next()) {
- Space* space = it.next();
- if (space == heap_->new_space()) {
- space->AddAllocationObserver(&new_generation_observer_);
- } else {
- space->AddAllocationObserver(&old_generation_observer_);
- }
- }
-
+ heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
+ &new_generation_observer_);
incremental_marking_job()->Start(heap_);
}
@@ -427,7 +420,7 @@ void IncrementalMarking::StartMarking() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
- if (FLAG_concurrent_marking) {
+ if (FLAG_concurrent_marking && heap_->use_tasks()) {
heap_->concurrent_marking()->ScheduleTasks();
}
@@ -442,9 +435,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(!black_allocation_);
DCHECK(IsMarking());
black_allocation_ = true;
- heap()->old_space()->MarkAllocationInfoBlack();
- heap()->map_space()->MarkAllocationInfoBlack();
- heap()->code_space()->MarkAllocationInfoBlack();
+ heap()->old_space()->MarkLinearAllocationAreaBlack();
+ heap()->map_space()->MarkLinearAllocationAreaBlack();
+ heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@@ -454,9 +447,9 @@ void IncrementalMarking::StartBlackAllocation() {
void IncrementalMarking::PauseBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
- heap()->old_space()->UnmarkAllocationInfo();
- heap()->map_space()->UnmarkAllocationInfo();
- heap()->code_space()->UnmarkAllocationInfo();
+ heap()->old_space()->UnmarkLinearAllocationArea();
+ heap()->map_space()->UnmarkLinearAllocationArea();
+ heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@@ -687,7 +680,7 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
DCHECK(IsMarking());
DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
Page* page = Page::FromAddress(obj->address());
- if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+ if (page->owner()->identity() == LO_SPACE) {
page->ResetProgressBar();
}
Map* map = obj->map();
@@ -996,10 +989,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
marking_worklist()->shared()->MergeGlobalPool(
marking_worklist()->on_hold());
}
+
+// Only print marking worklist in debug mode to save ~40KB of code size.
+#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
}
+#endif
if (worklist_to_process == WorklistToProcess::kBailout) {
bytes_processed =
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 2f21b382b6..0a23e774b3 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -33,7 +33,7 @@ class LocalAllocator {
compaction_spaces_.Get(CODE_SPACE));
// Give back remaining LAB space if this LocalAllocator's new space LAB
// sits right next to new space allocation top.
- const AllocationInfo info = new_space_lab_.Close();
+ const LinearAllocationArea info = new_space_lab_.Close();
const Address top = new_space_->top();
if (info.limit() != nullptr && info.limit() == top) {
DCHECK_NOT_NULL(info.top());
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 4ae9dce439..30a7e55d6b 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -625,9 +625,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
verifier.Run();
}
#endif
-
- if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
@@ -925,6 +922,7 @@ void MarkCompactCollector::Finish() {
#endif
sweeper()->StartSweeperTasks();
+ sweeper()->StartIterabilityTasks();
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash();
@@ -1452,9 +1450,11 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
- local_pretenuring_feedback_(local_pretenuring_feedback) {}
+ local_pretenuring_feedback_(local_pretenuring_feedback),
+ is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
inline bool Visit(HeapObject* object, int size) override {
+ if (TryEvacuateWithoutCopy(object)) return true;
HeapObject* target_object = nullptr;
if (heap_->ShouldBePromoted(object->address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
@@ -1474,6 +1474,26 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
+ inline bool TryEvacuateWithoutCopy(HeapObject* object) {
+ if (is_incremental_marking_) return false;
+
+ Map* map = object->map();
+
+ // Some objects can be evacuated without creating a copy.
+ if (map->visitor_id() == kVisitThinString) {
+ HeapObject* actual = ThinString::cast(object)->unchecked_actual();
+ if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
+ base::Relaxed_Store(
+ reinterpret_cast<base::AtomicWord*>(object->address()),
+ reinterpret_cast<base::AtomicWord>(
+ MapWord::FromForwardingAddress(actual).ToMap()));
+ return true;
+ }
+ // TODO(mlippautz): Handle ConsString.
+
+ return false;
+ }
+
inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
AllocationAlignment alignment = old_object->RequiredAlignment();
@@ -1505,6 +1525,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ bool is_incremental_marking_;
};
template <PageEvacuationMode mode>
@@ -1633,28 +1654,16 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
DCHECK(marking_worklist()->IsBailoutEmpty());
}
-// Mark all objects reachable (transitively) from objects on the marking
-// stack including references only considered in the atomic marking pause.
-void MarkCompactCollector::ProcessEphemeralMarking(
- bool only_process_harmony_weak_collections) {
+void MarkCompactCollector::ProcessEphemeralMarking() {
DCHECK(marking_worklist()->IsEmpty());
bool work_to_do = true;
while (work_to_do) {
- if (!only_process_harmony_weak_collections) {
- if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
- heap_->local_embedder_heap_tracer()->Trace(
- 0,
- EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- }
- } else {
- // TODO(mlippautz): We currently do not trace through blink when
- // discovering new objects reachable from weak roots (that have been made
- // strong). This is a limitation of not having a separate handle type
- // that doesn't require zapping before this phase. See crbug.com/668060.
- heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ heap_->local_embedder_heap_tracer()->Trace(
+ 0, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
ProcessWeakCollections();
work_to_do = !marking_worklist()->IsEmpty();
@@ -1680,54 +1689,12 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
}
-class ObjectStatsVisitor : public HeapObjectVisitor {
- public:
- ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
- ObjectStats* dead_stats)
- : live_collector_(heap, live_stats),
- dead_collector_(heap, dead_stats),
- marking_state_(
- heap->mark_compact_collector()->non_atomic_marking_state()) {
- DCHECK_NOT_NULL(live_stats);
- DCHECK_NOT_NULL(dead_stats);
- // Global objects are roots and thus recorded as live.
- live_collector_.CollectGlobalStatistics();
- }
-
- bool Visit(HeapObject* obj, int size) override {
- if (marking_state_->IsBlack(obj)) {
- live_collector_.CollectStatistics(obj);
- } else {
- DCHECK(!marking_state_->IsGrey(obj));
- dead_collector_.CollectStatistics(obj);
- }
- return true;
- }
-
- private:
- ObjectStatsCollector live_collector_;
- ObjectStatsCollector dead_collector_;
- MarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
- SpaceIterator space_it(heap());
- HeapObject* obj = nullptr;
- while (space_it.has_next()) {
- std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
- ObjectIterator* obj_it = it.get();
- while ((obj = obj_it->Next()) != nullptr) {
- visitor->Visit(obj, obj->Size());
- }
- }
-}
-
void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(FLAG_gc_stats)) {
heap()->CreateObjectStats();
- ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
- heap()->dead_object_stats_);
- VisitAllObjects(&visitor);
+ ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
+ heap()->dead_object_stats_);
+ collector.Collect();
if (V8_UNLIKELY(FLAG_gc_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
std::stringstream live, dead;
@@ -1844,6 +1811,8 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
@@ -2146,7 +2115,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
- heap()->mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
+ heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
CleanupSweepToIteratePages();
}
@@ -2186,6 +2155,15 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
}
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [](MemoryChunk* chunk) {
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ } else {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ }
+ });
+
heap()->account_external_memory_concurrently_freed();
}
@@ -2210,7 +2188,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2227,7 +2205,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2266,7 +2244,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
- new_space->ResetAllocationInfo();
+ new_space->ResetLinearAllocationArea();
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
@@ -2367,20 +2345,20 @@ void MarkCompactCollector::MarkLiveObjects() {
DCHECK(marking_worklist()->IsEmpty());
- // The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable due to host
- // application specific logic or through Harmony weak maps.
+ // The objects reachable from the roots are marked, yet unreachable objects
+ // are unmarked. Mark objects reachable due to embedder heap tracing or
+ // harmony weak maps.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
- ProcessEphemeralMarking(false);
+ ProcessEphemeralMarking();
DCHECK(marking_worklist()->IsEmpty());
}
- // The objects reachable from the roots, weak maps or object groups
- // are marked. Objects pointed to only by weak global handles cannot be
- // immediately reclaimed. Instead, we have to mark them as pending and mark
- // objects reachable from them.
+ // The objects reachable from the roots, weak maps, and embedder heap
+ // tracing are marked. Objects pointed to only by weak global handles cannot
+ // be immediately reclaimed. Instead, we have to mark them as pending and
+ // mark objects reachable from them.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
@@ -2392,6 +2370,8 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessMarkingWorklist();
}
+ // Process finalizers, effectively keeping them alive until the next
+ // garbage collection.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
@@ -2400,14 +2380,10 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessMarkingWorklist();
}
- // Repeat Harmony weak maps marking to mark unmarked objects reachable from
- // the weak roots we just marked as pending destruction.
- //
- // We only process harmony collections, as all object groups have been fully
- // processed and no weakly reachable node can discover new objects groups.
+ // Repeat ephemeral processing from the newly marked objects.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
- ProcessEphemeralMarking(true);
+ ProcessEphemeralMarking();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
@@ -2914,7 +2890,7 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
- new_space->ResetAllocationInfo();
+ new_space->ResetLinearAllocationArea();
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
@@ -2998,6 +2974,8 @@ class Evacuator : public Malloced {
// to be called from the main thread.
inline void Finalize();
+ virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
+
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -3077,6 +3055,10 @@ class FullEvacuator : public Evacuator {
RecordMigratedSlotVisitor* record_visitor)
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
+ }
+
protected:
void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
@@ -3133,6 +3115,10 @@ class YoungGenerationEvacuator : public Evacuator {
RecordMigratedSlotVisitor* record_visitor)
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
+ }
+
protected:
void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
@@ -3210,9 +3196,12 @@ class PageEvacuationItem : public ItemParallelJob::Item {
class PageEvacuationTask : public ItemParallelJob::Task {
public:
PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
- : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
+ : ItemParallelJob::Task(isolate),
+ evacuator_(evacuator),
+ tracer_(isolate->heap()->tracer()) {}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
PageEvacuationItem* item = nullptr;
while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
evacuator_->EvacuatePage(item->page());
@@ -3222,6 +3211,7 @@ class PageEvacuationTask : public ItemParallelJob::Task {
private:
Evacuator* evacuator_;
+ GCTracer* tracer_;
};
template <class Evacuator, class Collector>
@@ -3477,12 +3467,11 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->ForAllFreeListCategories(
- [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ DCHECK_EQ(OLD_SPACE, p->owner()->identity());
+ sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
new_space_evacuation_pages_.clear();
@@ -3521,16 +3510,24 @@ class UpdatingItem : public ItemParallelJob::Item {
class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpdatingTask(Isolate* isolate)
- : ItemParallelJob::Task(isolate) {}
+ explicit PointersUpdatingTask(Isolate* isolate,
+ GCTracer::BackgroundScope::ScopeId scope)
+ : ItemParallelJob::Task(isolate),
+ tracer_(isolate->heap()->tracer()),
+ scope_(scope) {}
void RunInParallel() override {
+ TRACE_BACKGROUND_GC(tracer_, scope_);
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
};
+
+ private:
+ GCTracer* tracer_;
+ GCTracer::BackgroundScope::ScopeId scope_;
};
template <typename MarkingState>
@@ -3921,7 +3918,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
}
@@ -3951,7 +3950,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(),
+ GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
@@ -3996,11 +3997,15 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks = NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
+ const int remembered_set_tasks =
+ remembered_set_pages == 0 ? 0
+ : NumberOfParallelPointerUpdateTasks(
+ remembered_set_pages, old_to_new_slots_);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(isolate()));
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(), GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
{
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index a68be9b241..6fda00633c 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -723,8 +723,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
- void VisitAllObjects(HeapObjectVisitor* visitor);
-
void RecordObjectStats();
// Finishes GC, performs heap verification if enabled.
@@ -751,13 +749,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(ObjectVisitor* visitor);
- // Mark objects reachable (transitively) from objects in the marking stack
- // or overflowed in the heap. This respects references only considered in
- // the final atomic marking pause including the following:
- // - Processing of objects reachable through Harmony WeakMaps.
- // - Objects reachable due to host application logic like object groups,
- // implicit references' groups, or embedder heap tracing.
- void ProcessEphemeralMarking(bool only_process_harmony_weak_collections);
+ // Marks object reachable from harmony weak maps and wrapper tracing.
+ void ProcessEphemeralMarking();
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat embedded pointers in the code as strong as
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index d8fe9fe7d8..f58a472671 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -4,6 +4,8 @@
#include "src/heap/object-stats.h"
+#include <unordered_set>
+
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/compilation-cache.h"
@@ -19,7 +21,6 @@ namespace internal {
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_, 0, sizeof(object_counts_));
memset(object_sizes_, 0, sizeof(object_sizes_));
@@ -104,16 +105,18 @@ void ObjectStats::PrintJSON(const char* key) {
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
CODE_KIND_LIST(CODE_KIND_WRAPPER)
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
+ VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
-#undef PRINT_INSTANCE_TYPE_DATA
-#undef PRINT_KEY_AND_ID
+#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
void ObjectStats::DumpInstanceTypeData(std::stringstream& stream,
@@ -154,58 +157,23 @@ void ObjectStats::Dump(std::stringstream& stream) {
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
+ DumpInstanceTypeData(stream, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
CODE_KIND_LIST(CODE_KIND_WRAPPER);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
+ VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
-#undef PRINT_INSTANCE_TYPE_DATA
+#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
void ObjectStats::CheckpointObjectStats() {
base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
- Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- counters->count_of_##name()->Increment( \
- static_cast<int>(object_counts_[name])); \
- counters->count_of_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[name])); \
- counters->size_of_##name()->Increment( \
- static_cast<int>(object_sizes_[name])); \
- counters->size_of_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[name]));
- INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
- int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- counters->count_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- counters->count_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
@@ -233,6 +201,14 @@ void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
size_histogram_[type][HistogramIndexFromSize(size)]++;
}
+void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
+ size_t size) {
+ DCHECK_LE(type, LAST_VIRTUAL_TYPE);
+ object_counts_[FIRST_VIRTUAL_TYPE + type]++;
+ object_sizes_[FIRST_VIRTUAL_TYPE + type] += size;
+ size_histogram_[FIRST_VIRTUAL_TYPE + type][HistogramIndexFromSize(size)]++;
+}
+
void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
@@ -267,18 +243,117 @@ bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
-ObjectStatsCollector::ObjectStatsCollector(Heap* heap, ObjectStats* stats)
+class ObjectStatsCollectorImpl {
+ public:
+ ObjectStatsCollectorImpl(Heap* heap, ObjectStats* stats);
+
+ void CollectGlobalStatistics();
+
+ // Collects statistics of objects for virtual instance types.
+ void CollectVirtualStatistics(HeapObject* obj);
+
+ // Collects statistics of objects for regular instance types.
+ void CollectStatistics(HeapObject* obj);
+
+ private:
+ class CompilationCacheTableVisitor;
+
+ void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+ void RecordBytecodeArrayDetails(BytecodeArray* obj);
+ void RecordCodeDetails(Code* code);
+ void RecordFixedArrayDetails(FixedArray* array);
+ void RecordJSCollectionDetails(JSObject* obj);
+ void RecordJSObjectDetails(JSObject* object);
+ void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
+ void RecordMapDetails(Map* map);
+ void RecordScriptDetails(Script* obj);
+ void RecordTemplateInfoDetails(TemplateInfo* obj);
+ void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
+
+ bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype, size_t overhead);
+ void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype);
+ template <class HashTable>
+ void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
+ bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
+
+ void RecordVirtualObjectStats(HeapObject* obj,
+ ObjectStats::VirtualInstanceType type,
+ size_t size);
+ void RecordVirtualAllocationSiteDetails(AllocationSite* site);
+
+ Heap* heap_;
+ ObjectStats* stats_;
+ MarkCompactCollector::NonAtomicMarkingState* marking_state_;
+ std::unordered_set<HeapObject*> virtual_objects_;
+
+ friend class ObjectStatsCollectorImpl::CompilationCacheTableVisitor;
+};
+
+ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
+ ObjectStats* stats)
: heap_(heap),
stats_(stats),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
-void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
+// For entries which shared the same instance type (historically FixedArrays)
+// we do a pre-pass and create virtual instance types.
+void ObjectStatsCollectorImpl::CollectVirtualStatistics(HeapObject* obj) {
+ if (obj->IsAllocationSite()) {
+ RecordVirtualAllocationSiteDetails(AllocationSite::cast(obj));
+ }
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualObjectStats(
+ HeapObject* obj, ObjectStats::VirtualInstanceType type, size_t size) {
+ virtual_objects_.insert(obj);
+ stats_->RecordVirtualObjectStats(type, size);
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
+ AllocationSite* site) {
+ if (!site->PointsToLiteral()) return;
+ JSObject* boilerplate = site->boilerplate();
+ if (boilerplate->IsJSArray()) {
+ RecordVirtualObjectStats(boilerplate,
+ ObjectStats::JS_ARRAY_BOILERPLATE_TYPE,
+ boilerplate->Size());
+ // Array boilerplates cannot have properties.
+ } else {
+ RecordVirtualObjectStats(boilerplate,
+ ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
+ boilerplate->Size());
+ if (boilerplate->HasFastProperties()) {
+ // We'll misclassify the empty_proeprty_array here. Given that there is a
+ // single instance, this is neglible.
+ PropertyArray* properties = boilerplate->property_array();
+ RecordVirtualObjectStats(properties,
+ ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE,
+ properties->Size());
+ } else {
+ NameDictionary* properties = boilerplate->property_dictionary();
+ RecordVirtualObjectStats(properties,
+ ObjectStats::BOILERPLATE_NAME_DICTIONARY_TYPE,
+ properties->Size());
+ }
+ }
+ FixedArrayBase* elements = boilerplate->elements();
+ // We skip COW elements since they are shared, and we are sure that if the
+ // boilerplate exists there must have been at least one instantiation.
+ if (!elements->IsCowArray()) {
+ RecordVirtualObjectStats(elements, ObjectStats::BOILERPLATE_ELEMENTS_TYPE,
+ elements->Size());
+ }
+}
+
+void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj) {
Map* map = obj->map();
// Record for the InstanceType.
int object_size = obj->Size();
- stats_->RecordObjectStats(map->instance_type(), object_size);
+ RecordObjectStats(obj, map->instance_type(), object_size);
// Record specific sub types where possible.
if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
@@ -303,9 +378,10 @@ void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
}
-class ObjectStatsCollector::CompilationCacheTableVisitor : public RootVisitor {
+class ObjectStatsCollectorImpl::CompilationCacheTableVisitor
+ : public RootVisitor {
public:
- explicit CompilationCacheTableVisitor(ObjectStatsCollector* parent)
+ explicit CompilationCacheTableVisitor(ObjectStatsCollectorImpl* parent)
: parent_(parent) {}
void VisitRootPointers(Root root, Object** start, Object** end) override {
@@ -319,15 +395,15 @@ class ObjectStatsCollector::CompilationCacheTableVisitor : public RootVisitor {
}
private:
- ObjectStatsCollector* parent_;
+ ObjectStatsCollectorImpl* parent_;
};
-void ObjectStatsCollector::CollectGlobalStatistics() {
+void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Global FixedArrays.
RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->serialized_templates(),
- SERIALIZED_TEMPLATES_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->serialized_objects(),
+ SERIALIZED_OBJECTS_SUB_TYPE, 0);
RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
NUMBER_STRING_CACHE_SUB_TYPE, 0);
RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
@@ -359,6 +435,13 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
compilation_cache->Iterate(&v);
}
+void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
+ InstanceType type,
+ size_t size) {
+ if (virtual_objects_.find(obj) == virtual_objects_.end())
+ stats_->RecordObjectStats(type, size);
+}
+
static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
array != heap->empty_fixed_array() &&
@@ -371,15 +454,16 @@ static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
return array->map() == heap->fixed_cow_array_map();
}
-bool ObjectStatsCollector::SameLiveness(HeapObject* obj1, HeapObject* obj2) {
+bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
+ HeapObject* obj2) {
return obj1 == nullptr || obj2 == nullptr ||
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype,
- size_t overhead) {
+bool ObjectStatsCollectorImpl::RecordFixedArrayHelper(HeapObject* parent,
+ FixedArray* array,
+ int subtype,
+ size_t overhead) {
if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
!IsCowArray(heap_, array)) {
return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
@@ -388,9 +472,8 @@ bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
return false;
}
-void ObjectStatsCollector::RecursivelyRecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype) {
+void ObjectStatsCollectorImpl::RecursivelyRecordFixedArrayHelper(
+ HeapObject* parent, FixedArray* array, int subtype) {
if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
for (int i = 0; i < array->length(); i++) {
if (array->get(i)->IsFixedArray()) {
@@ -402,9 +485,9 @@ void ObjectStatsCollector::RecursivelyRecordFixedArrayHelper(HeapObject* parent,
}
template <class HashTable>
-void ObjectStatsCollector::RecordHashTableHelper(HeapObject* parent,
- HashTable* array,
- int subtype) {
+void ObjectStatsCollectorImpl::RecordHashTableHelper(HeapObject* parent,
+ HashTable* array,
+ int subtype) {
int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
CHECK_GE(array->Size(), used);
size_t overhead = array->Size() - used -
@@ -413,7 +496,7 @@ void ObjectStatsCollector::RecordHashTableHelper(HeapObject* parent,
RecordFixedArrayHelper(parent, array, subtype, overhead);
}
-void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
+void ObjectStatsCollectorImpl::RecordJSObjectDetails(JSObject* object) {
size_t overhead = 0;
FixedArrayBase* elements = object->elements();
if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
@@ -448,7 +531,7 @@ void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
}
}
-void ObjectStatsCollector::RecordJSWeakCollectionDetails(
+void ObjectStatsCollectorImpl::RecordJSWeakCollectionDetails(
JSWeakCollection* obj) {
if (obj->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(obj->table());
@@ -458,7 +541,7 @@ void ObjectStatsCollector::RecordJSWeakCollectionDetails(
}
}
-void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
+void ObjectStatsCollectorImpl::RecordJSCollectionDetails(JSObject* obj) {
// The JS versions use a different HashTable implementation that cannot use
// the regular helper. Since overall impact is usually small just record
// without overhead.
@@ -472,12 +555,12 @@ void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
}
}
-void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
+void ObjectStatsCollectorImpl::RecordScriptDetails(Script* obj) {
FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
}
-void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
+void ObjectStatsCollectorImpl::RecordMapDetails(Map* map_obj) {
DescriptorArray* array = map_obj->instance_descriptors();
if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
SameLiveness(map_obj, array)) {
@@ -508,7 +591,7 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
}
}
-void ObjectStatsCollector::RecordTemplateInfoDetails(TemplateInfo* obj) {
+void ObjectStatsCollectorImpl::RecordTemplateInfoDetails(TemplateInfo* obj) {
if (obj->property_accessors()->IsFixedArray()) {
RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
TEMPLATE_INFO_SUB_TYPE, 0);
@@ -519,14 +602,14 @@ void ObjectStatsCollector::RecordTemplateInfoDetails(TemplateInfo* obj) {
}
}
-void ObjectStatsCollector::RecordBytecodeArrayDetails(BytecodeArray* obj) {
+void ObjectStatsCollectorImpl::RecordBytecodeArrayDetails(BytecodeArray* obj) {
RecordFixedArrayHelper(obj, obj->constant_pool(),
BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
RecordFixedArrayHelper(obj, obj->handler_table(),
BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
}
-void ObjectStatsCollector::RecordCodeDetails(Code* code) {
+void ObjectStatsCollectorImpl::RecordCodeDetails(Code* code) {
stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
RecordFixedArrayHelper(code, code->deoptimization_data(),
DEOPTIMIZATION_DATA_SUB_TYPE, 0);
@@ -554,7 +637,7 @@ void ObjectStatsCollector::RecordCodeDetails(Code* code) {
}
}
-void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
+void ObjectStatsCollectorImpl::RecordSharedFunctionInfoDetails(
SharedFunctionInfo* sfi) {
FixedArray* scope_info = sfi->scope_info();
RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
@@ -565,7 +648,7 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
}
}
-void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
+void ObjectStatsCollectorImpl::RecordFixedArrayDetails(FixedArray* array) {
if (array->IsContext()) {
RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
}
@@ -585,5 +668,85 @@ void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
}
}
+class ObjectStatsVisitor {
+ public:
+ enum CollectionMode {
+ kRegular,
+ kVirtual,
+ };
+
+ ObjectStatsVisitor(Heap* heap, ObjectStatsCollectorImpl* live_collector,
+ ObjectStatsCollectorImpl* dead_collector,
+ CollectionMode mode)
+ : live_collector_(live_collector),
+ dead_collector_(dead_collector),
+ marking_state_(
+ heap->mark_compact_collector()->non_atomic_marking_state()),
+ mode_(mode) {}
+
+ bool Visit(HeapObject* obj, int size) {
+ if (marking_state_->IsBlack(obj)) {
+ Collect(live_collector_, obj);
+ } else {
+ DCHECK(!marking_state_->IsGrey(obj));
+ Collect(dead_collector_, obj);
+ }
+ return true;
+ }
+
+ private:
+ void Collect(ObjectStatsCollectorImpl* collector, HeapObject* obj) {
+ switch (mode_) {
+ case kRegular:
+ collector->CollectStatistics(obj);
+ break;
+ case kVirtual:
+ collector->CollectVirtualStatistics(obj);
+ break;
+ }
+ }
+
+ ObjectStatsCollectorImpl* live_collector_;
+ ObjectStatsCollectorImpl* dead_collector_;
+ MarkCompactCollector::NonAtomicMarkingState* marking_state_;
+ CollectionMode mode_;
+};
+
+namespace {
+
+void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
+ SpaceIterator space_it(heap);
+ HeapObject* obj = nullptr;
+ while (space_it.has_next()) {
+ std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
+ ObjectIterator* obj_it = it.get();
+ while ((obj = obj_it->Next()) != nullptr) {
+ visitor->Visit(obj, obj->Size());
+ }
+ }
+}
+
+} // namespace
+
+void ObjectStatsCollector::Collect() {
+ ObjectStatsCollectorImpl live_collector(heap_, live_);
+ ObjectStatsCollectorImpl dead_collector(heap_, dead_);
+ // 1. Collect system type otherwise indistinguishable from other types.
+ {
+ ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
+ ObjectStatsVisitor::kVirtual);
+ IterateHeap(heap_, &visitor);
+ }
+
+ // 2. Collect globals; only applies to live objects.
+ live_collector.CollectGlobalStatistics();
+ // 3. Collect rest.
+ {
+ ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
+ ObjectStatsVisitor::kRegular);
+ IterateHeap(heap_, &visitor);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 18bbaaaa43..500ce36bd9 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -13,6 +13,19 @@
#include "src/heap/objects-visiting.h"
#include "src/objects.h"
+// These instance types do not exist for actual use but are merely introduced
+// for object stats tracing. In contrast to Code and FixedArray sub types
+// these types are not known to other counters outside of object stats
+// tracing.
+//
+// Update LAST_VIRTUAL_TYPE below when changing this macro.
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_NAME_DICTIONARY_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE)
+
namespace v8 {
namespace internal {
@@ -20,6 +33,14 @@ class ObjectStats {
public:
explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
+ // See description on VIRTUAL_INSTANCE_TYPE_LIST.
+ enum VirtualInstanceType {
+#define DEFINE_VIRTUAL_INSTANCE_TYPE(type) type,
+ VIRTUAL_INSTANCE_TYPE_LIST(DEFINE_VIRTUAL_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_VIRTUAL_TYPE = JS_OBJECT_BOILERPLATE_TYPE,
+ };
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -27,17 +48,19 @@ class ObjectStats {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- OBJECT_STATS_COUNT =
+ FIRST_VIRTUAL_TYPE =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_VIRTUAL_TYPE + LAST_VIRTUAL_TYPE + 1,
};
void ClearObjectStats(bool clear_last_time_stats = false);
- void CheckpointObjectStats();
void PrintJSON(const char* key);
void Dump(std::stringstream& stream);
+ void CheckpointObjectStats();
void RecordObjectStats(InstanceType type, size_t size);
+ void RecordVirtualObjectStats(VirtualInstanceType type, size_t size);
void RecordCodeSubTypeStats(int code_sub_type, size_t size);
bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
size_t size, size_t over_allocated);
@@ -88,37 +111,21 @@ class ObjectStats {
class ObjectStatsCollector {
public:
- ObjectStatsCollector(Heap* heap, ObjectStats* stats);
+ ObjectStatsCollector(Heap* heap, ObjectStats* live, ObjectStats* dead)
+ : heap_(heap), live_(live), dead_(dead) {
+ DCHECK_NOT_NULL(heap_);
+ DCHECK_NOT_NULL(live_);
+ DCHECK_NOT_NULL(dead_);
+ }
- void CollectGlobalStatistics();
- void CollectStatistics(HeapObject* obj);
+ // Collects type information of live and dead objects. Requires mark bits to
+ // be present.
+ void Collect();
private:
- class CompilationCacheTableVisitor;
-
- void RecordBytecodeArrayDetails(BytecodeArray* obj);
- void RecordCodeDetails(Code* code);
- void RecordFixedArrayDetails(FixedArray* array);
- void RecordJSCollectionDetails(JSObject* obj);
- void RecordJSObjectDetails(JSObject* object);
- void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
- void RecordMapDetails(Map* map);
- void RecordScriptDetails(Script* obj);
- void RecordTemplateInfoDetails(TemplateInfo* obj);
- void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
-
- bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype, size_t overhead);
- void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype);
- template <class HashTable>
- void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
- bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
- Heap* heap_;
- ObjectStats* stats_;
- MarkCompactCollector::NonAtomicMarkingState* marking_state_;
-
- friend class ObjectStatsCollector::CompilationCacheTableVisitor;
+ Heap* const heap_;
+ ObjectStats* const live_;
+ ObjectStats* const dead_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 39ebdd2cbd..c20434a283 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -20,6 +20,7 @@ class BigInt;
class BytecodeArray;
class JSArrayBuffer;
class JSRegExp;
+class JSWeakCollection;
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index a7583cb754..b649c010ae 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -103,7 +103,7 @@ void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_) {
+ if (!idle_task_pending_ && heap->use_tasks()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 99e1a8004e..b61872074e 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -58,8 +58,6 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
}
if (V8_UNLIKELY(is_logging_)) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
heap()->OnMoveEvent(target, source, size);
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 231a8f5074..be5fb87a90 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -153,20 +153,6 @@ void Scavenger::Process(OneshotBarrier* barrier) {
} while (!done);
}
-void Scavenger::RecordCopiedObject(HeapObject* obj) {
- bool should_record = FLAG_log_gc;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- if (should_record) {
- if (heap()->new_space()->Contains(obj)) {
- heap()->new_space()->RecordAllocation(obj);
- } else {
- heap()->new_space()->RecordPromotion(obj);
- }
- }
-}
-
void Scavenger::Finalize() {
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 75b24fe282..27ae2e8ab7 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -92,8 +92,6 @@ class Scavenger {
void IterateAndScavengePromotedObject(HeapObject* target, int size);
- void RecordCopiedObject(HeapObject* obj);
-
static inline bool ContainsOnlyData(VisitorId visitor_id);
Heap* const heap_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 8831417ce2..9e2d7e6354 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -17,6 +17,7 @@
#include "src/lookup-cache.h"
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
+#include "src/objects/data-handler.h"
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
@@ -69,6 +70,11 @@ const Heap::StructTable Heap::struct_table[] = {
{NAME##_TYPE, Name::kSize, k##Name##MapRootIndex},
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
+
+#define DATA_HANDLER_ELEMENT(NAME, Name, Size, name) \
+ {NAME##_TYPE, Name::kSizeWithData##Size, k##Name##Size##MapRootIndex},
+ DATA_HANDLER_LIST(DATA_HANDLER_ELEMENT)
+#undef DATA_HANDLER_ELEMENT
};
namespace {
@@ -188,9 +194,9 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(this, fixed_cow_array_map());
FinalizePartialMap(this, descriptor_array_map());
FinalizePartialMap(this, undefined_map());
- undefined_map()->set_is_undetectable();
+ undefined_map()->set_is_undetectable(true);
FinalizePartialMap(this, null_map());
- null_map()->set_is_undetectable();
+ null_map()->set_is_undetectable(true);
FinalizePartialMap(this, the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
@@ -300,6 +306,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
+
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
@@ -551,9 +559,7 @@ void Heap::CreateInitialObjects() {
set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
- set_weak_new_space_object_to_code_list(
- ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
- weak_new_space_object_to_code_list()->SetLength(0);
+ set_weak_new_space_object_to_code_list(*ArrayList::New(isolate(), 16));
set_feedback_vectors_for_profiling_tools(undefined_value());
@@ -632,7 +638,7 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
- set_serialized_templates(empty_fixed_array());
+ set_serialized_objects(empty_fixed_array());
set_serialized_global_proxy_sizes(empty_fixed_array());
set_weak_stack_trace_list(Smi::kZero);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index fb78b99c2f..39a62327df 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -137,21 +137,20 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-void Page::InitializeFreeListCategories() {
+void MemoryChunk::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
}
}
bool PagedSpace::Contains(Address addr) {
+ if (heap_->lo_space()->FindPage(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false;
- Page* p = Page::FromAddress(HeapObject::cast(o)->address());
- if (!Page::IsValid(p)) return false;
- return p->owner() == this;
+ return Page::FromAddress(HeapObject::cast(o)->address())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
@@ -186,18 +185,13 @@ bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) {
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
- uintptr_t offset = addr - chunk->address();
- if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
- chunk = heap->lo_space()->FindPageThreadSafe(addr);
+ MemoryChunk* chunk = heap->lo_space()->FindPage(addr);
+ if (chunk == nullptr) {
+ chunk = MemoryChunk::FromAddress(addr);
}
return chunk;
}
-Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
- return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -301,8 +295,7 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
return true;
- if (free_list_.Allocate(size_in_bytes)) return true;
- return SlowAllocateRaw(size_in_bytes);
+ return SlowRefillLinearAllocationArea(size_in_bytes);
}
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -393,11 +386,11 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
#endif
HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
- AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj->address(), size_in_bytes);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj->address(), size_in_bytes);
StartNextInlineAllocationStep();
}
return result;
@@ -462,6 +455,12 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
#ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
@@ -484,7 +483,7 @@ size_t LargeObjectSpace::Available() {
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
- return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
+ return LocalAllocationBuffer(nullptr, LinearAllocationArea(nullptr, nullptr));
}
@@ -497,7 +496,7 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
USE(ok);
DCHECK(ok);
Address top = HeapObject::cast(obj)->address();
- return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
+ return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index c3663573b0..2dd5e9b24d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -12,6 +12,7 @@
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
@@ -57,8 +58,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
- heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(
- cur_page);
+ heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
@@ -71,16 +71,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->PauseAllocationObservers();
+ for (SpaceIterator it(heap_); it.has_next();) {
+ it.next()->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- space->ResumeAllocationObservers();
+ for (SpaceIterator it(heap_); it.has_next();) {
+ it.next()->ResumeAllocationObservers();
}
}
@@ -120,8 +118,8 @@ bool CodeRange::SetUp(size_t requested) {
VirtualMemory reservation;
if (!AlignedAllocVirtualMemory(
- requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()),
- base::OS::GetRandomMmapAddr(), &reservation)) {
+ requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
+ GetRandomMmapAddr(), &reservation)) {
return false;
}
@@ -133,7 +131,7 @@ bool CodeRange::SetUp(size_t requested) {
// the beginning of an executable space.
if (reserved_area > 0) {
if (!reservation.SetPermissions(base, reserved_area,
- base::OS::MemoryPermission::kReadWrite))
+ PageAllocator::kReadWrite))
return false;
base += reserved_area;
@@ -228,7 +226,7 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return virtual_memory_.SetPermissions(start, length,
- base::OS::MemoryPermission::kNoAccess);
+ PageAllocator::kNoAccess);
}
@@ -236,8 +234,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.emplace_back(address, length);
- virtual_memory_.SetPermissions(address, length,
- base::OS::MemoryPermission::kNoAccess);
+ virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
@@ -316,20 +313,24 @@ void MemoryAllocator::TearDown() {
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
public:
explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate), unmapper_(unmapper) {}
+ : CancelableTask(isolate),
+ unmapper_(unmapper),
+ tracer_(isolate->heap()->tracer()) {}
private:
void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
Unmapper* const unmapper_;
+ GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- ReconsiderDelayedChunks();
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
@@ -380,23 +381,12 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
void MemoryAllocator::Unmapper::TearDown() {
CHECK_EQ(0, concurrent_unmapping_tasks_active_);
- ReconsiderDelayedChunks();
- CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
}
-void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
- std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
- // Move constructed, so the permanent list should be empty.
- DCHECK(delayed_regular_chunks_.empty());
- for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
- AddMemoryChunkSafe<kRegular>(*it);
- }
-}
-
int MemoryAllocator::Unmapper::NumberOfChunks() {
base::LockGuard<base::Mutex> guard(&mutex_);
size_t result = 0;
@@ -406,20 +396,9 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
return static_cast<int>(result);
}
-bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
- MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
- // We cannot free a memory chunk in new space while the sweeper is running
- // because the memory chunk can be in the queue of a sweeper task.
- // Chunks in old generation are unmapped if they are empty.
- DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
- return !chunk->InNewSpace() || mc == nullptr ||
- !mc->sweeper()->sweeping_in_progress();
-}
-
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
- if (!base::OS::SetPermissions(base, size,
- base::OS::MemoryPermission::kReadWrite)) {
+ if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
@@ -448,7 +427,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
- CHECK(base::OS::Free(base, size));
+ CHECK(FreePages(base, size));
}
}
@@ -481,7 +460,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
}
} else {
if (reservation.SetPermissions(base, commit_size,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
UpdateAllocatedSpaceLimits(base, base + commit_size);
} else {
base = nullptr;
@@ -545,8 +524,8 @@ void MemoryChunk::SetReadAndExecutable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
- CHECK(base::OS::SetPermissions(protect_start, protect_size,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(protect_start, protect_size,
+ PageAllocator::kReadExecute));
}
}
@@ -564,8 +543,8 @@ void MemoryChunk::SetReadAndWritable() {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
- CHECK(base::OS::SetPermissions(unprotect_start, unprotect_size,
- base::OS::MemoryPermission::kReadWrite));
+ CHECK(SetPermissions(unprotect_start, unprotect_size,
+ PageAllocator::kReadWrite));
}
}
@@ -604,6 +583,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
+ chunk->InitializeFreeListCategories();
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
@@ -618,9 +598,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAddressAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(base::OS::SetPermissions(
- area_start, area_size,
- base::OS::MemoryPermission::kReadWriteExecute));
+ CHECK(SetPermissions(area_start, area_size,
+ PageAllocator::kReadWriteExecute));
}
}
@@ -634,7 +613,6 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
- page->InitializeFreeListCategories();
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->InitializationMemoryFence();
@@ -866,6 +844,22 @@ size_t Page::AvailableInFreeList() {
return sum;
}
+#ifdef DEBUG
+namespace {
+// Skips filler starting from the given filler until the end address.
+// Returns the first address after the skipped fillers.
+Address SkipFillers(HeapObject* filler, Address end) {
+ Address addr = filler->address();
+ while (addr < end) {
+ filler = HeapObject::FromAddress(addr);
+ CHECK(filler->IsFiller());
+ addr = filler->address() + filler->Size();
+ }
+ return addr;
+}
+} // anonymous namespace
+#endif // DEBUG
+
size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation.
@@ -877,29 +871,13 @@ size_t Page::ShrinkToHighWaterMark() {
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
- if (!filler->IsFreeSpace()) return 0;
-
-#ifdef DEBUG
- // Check the the filler is indeed the last filler on the page.
- HeapObjectIterator it(this);
- HeapObject* filler2 = nullptr;
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
- }
- if (filler2 == nullptr || filler2->address() == area_end()) return 0;
- DCHECK(filler2->IsFiller());
- // The deserializer might leave behind fillers. In this case we need to
- // iterate even further.
- while ((filler2->address() + filler2->Size()) != area_end()) {
- filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
- DCHECK(filler2->IsFiller());
- }
- DCHECK_EQ(filler->address(), filler2->address());
-#endif // DEBUG
+ // Ensure that no objects were allocated in [filler, area_end) region.
+ DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
+ // Ensure that no objects will be allocated on this page.
+ DCHECK_EQ(0u, AvailableInFreeList());
- size_t unused = RoundDown(
- static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
- MemoryAllocator::GetCommitPageSize());
+ size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
+ MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
@@ -914,8 +892,10 @@ size_t Page::ShrinkToHighWaterMark() {
ClearRecordedSlots::kNo);
heap()->memory_allocator()->PartialFreeMemory(
this, address() + size() - unused, unused, area_end() - unused);
- CHECK(filler->IsFiller());
- CHECK_EQ(filler->address() + filler->Size(), area_end());
+ if (filler->address() != area_end()) {
+ CHECK(filler->IsFiller());
+ CHECK_EQ(filler->address() + filler->Size(), area_end());
+ }
}
return unused;
}
@@ -959,7 +939,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
reservation->SetPermissions(chunk->area_end_, page_size,
- base::OS::MemoryPermission::kNoAccess);
+ PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
@@ -1111,9 +1091,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!base::OS::SetPermissions(start, size,
- base::OS::MemoryPermission::kNoAccess))
- return false;
+ if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -1121,19 +1099,10 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
void MemoryAllocator::ZapBlock(Address start, size_t size) {
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = kZapValue;
+ Memory::Address_at(start + s) = reinterpret_cast<Address>(kZapValue);
}
}
-#ifdef DEBUG
-void MemoryAllocator::ReportStatistics() {
- size_t size = Size();
- float pct = static_cast<float>(capacity_ - size) / capacity_;
- PrintF(" capacity: %zu , used: %" PRIuS ", available: %%%d\n\n",
- capacity_, size, static_cast<int>(pct * 100));
-}
-#endif
-
size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
@@ -1159,7 +1128,7 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
- return base::OS::CommitPageSize();
+ return CommitPageSize();
}
}
@@ -1180,26 +1149,23 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
const Address code_area = start + code_area_offset;
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
- if (vm->SetPermissions(start, pre_guard_offset,
- base::OS::MemoryPermission::kReadWrite)) {
+ if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
- base::OS::MemoryPermission::kNoAccess)) {
+ PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
- base::OS::MemoryPermission::kNoAccess)) {
+ PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
- vm->SetPermissions(code_area, commit_size,
- base::OS::MemoryPermission::kNoAccess);
+ vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
}
}
- vm->SetPermissions(start, pre_guard_offset,
- base::OS::MemoryPermission::kNoAccess);
+ vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
return false;
}
@@ -1379,7 +1345,7 @@ void Space::ResumeAllocationObservers() {
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
- if (!allocation_observers_paused_) {
+ if (AllocationObserversActive()) {
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(bytes_since_last, soon_object, size);
@@ -1399,14 +1365,11 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : Space(heap, space, executable),
+ : SpaceWithLinearArea(heap, space, executable),
anchor_(this),
- free_list_(this),
- top_on_previous_step_(0) {
+ free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
-
- allocation_info_.Reset(nullptr, nullptr);
}
@@ -1469,7 +1432,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// area_size_
// anchor_
- other->EmptyAllocationInfo();
+ other->FreeLinearAllocationArea();
// The linear allocation area of {other} should be destroyed now.
DCHECK_NULL(other->top());
@@ -1574,12 +1537,18 @@ size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
return unused;
}
+void PagedSpace::ResetFreeList() {
+ for (Page* page : *this) {
+ free_list_.EvictFreeListItems(page);
+ }
+ DCHECK(free_list_.IsEmpty());
+}
+
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- EmptyAllocationInfo();
+ FreeLinearAllocationArea();
ResetFreeList();
-
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
ShrinkPageToHighWaterMark(page);
@@ -1623,7 +1592,7 @@ void PagedSpace::ResetFreeListStatistics() {
}
}
-void PagedSpace::SetAllocationInfo(Address top, Address limit) {
+void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
@@ -1645,35 +1614,38 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
}
}
-Address PagedSpace::ComputeLimit(Address start, Address end,
- size_t size_in_bytes) {
- DCHECK_GE(end - start, size_in_bytes);
+Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
+ size_t min_size) {
+ DCHECK_GE(end - start, min_size);
if (heap()->inline_allocation_disabled()) {
- // Keep the linear allocation area to fit exactly the requested size.
- return start + size_in_bytes;
- } else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
- identity() == OLD_SPACE && !is_local()) {
- // Generated code may allocate inline from the linear allocation area for
- // Old Space. To make sure we can observe these allocations, we use a lower
- // limit.
- size_t step = RoundSizeDownToObjectAlignment(
- static_cast<int>(GetNextInlineAllocationStepSize()));
- return Max(start + size_in_bytes, Min(start + step, end));
+ // Fit the requested area exactly.
+ return start + min_size;
+ } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
+ // Generated code may allocate inline from the linear allocation area for.
+ // To make sure we can observe these allocations, we use a lower limit.
+ size_t step = GetNextInlineAllocationStepSize();
+
+ // TODO(ofrobots): there is subtle difference between old space and new
+ // space here. Any way to avoid it? `step - 1` makes more sense as we would
+ // like to sample the object that straddles the `start + step` boundary.
+ // Rounding down further would introduce a small statistical error in
+ // sampling. However, presently PagedSpace requires limit to be aligned.
+ size_t rounded_step;
+ if (identity() == NEW_SPACE) {
+ DCHECK_GE(step, 1);
+ rounded_step = step - 1;
+ } else {
+ rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
+ }
+ return Min(start + min_size + rounded_step, end);
} else {
// The entire node can be used as the linear allocation area.
return end;
}
}
-void PagedSpace::StartNextInlineAllocationStep() {
- if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
- top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
- DecreaseLimit(ComputeLimit(top(), limit(), 0));
- }
-}
-
-void PagedSpace::MarkAllocationInfoBlack() {
+void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
@@ -1683,7 +1655,7 @@ void PagedSpace::MarkAllocationInfoBlack() {
}
}
-void PagedSpace::UnmarkAllocationInfo() {
+void PagedSpace::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
@@ -1692,8 +1664,7 @@ void PagedSpace::UnmarkAllocationInfo() {
}
}
-// Empty space allocation info, returning unused area to free list.
-void PagedSpace::EmptyAllocationInfo() {
+void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
@@ -1718,12 +1689,7 @@ void PagedSpace::EmptyAllocationInfo() {
}
}
- if (top_on_previous_step_) {
- DCHECK(current_top >= top_on_previous_step_);
- AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
- nullptr, 0);
- top_on_previous_step_ = 0;
- }
+ InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top);
@@ -1771,6 +1737,62 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
+ DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_LE(top(), limit());
+#ifdef DEBUG
+ if (top() != limit()) {
+ DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
+ }
+#endif
+ // Don't free list allocate if there is linear space available.
+ DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
+
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ FreeLinearAllocationArea();
+
+ if (!is_local()) {
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ }
+
+ size_t new_node_size = 0;
+ FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ if (new_node == nullptr) return false;
+
+ DCHECK_GE(new_node_size, size_in_bytes);
+
+#ifdef DEBUG
+ for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
+ reinterpret_cast<Object**>(new_node->address())[i] =
+ Smi::FromInt(kCodeZapValue);
+ }
+#endif
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
+
+ Address start = new_node->address();
+ Address end = new_node->address() + new_node_size;
+ Address limit = ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ Free(limit, end - limit);
+ }
+ SetLinearAllocationArea(start, limit);
+
+ return true;
+}
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -1904,31 +1926,13 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity,
return false;
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
- ResetAllocationInfo();
-
- // Allocate and set up the histogram arrays if necessary.
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-#define SET_NAME(name) \
- allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
- INSTANCE_TYPE_LIST(SET_NAME)
-#undef SET_NAME
+ ResetLinearAllocationArea();
return true;
}
void NewSpace::TearDown() {
- if (allocated_histogram_) {
- DeleteArray(allocated_histogram_);
- allocated_histogram_ = nullptr;
- }
- if (promoted_histogram_) {
- DeleteArray(promoted_histogram_);
- promoted_histogram_ = nullptr;
- }
-
allocation_info_.Reset(nullptr, nullptr);
to_space_.TearDown();
@@ -1952,7 +1956,7 @@ void NewSpace::Grow() {
if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
- CHECK(false);
+ FATAL("inconsistent state");
}
}
}
@@ -1973,7 +1977,7 @@ void NewSpace::Shrink() {
if (!to_space_.GrowTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
- CHECK(false);
+ FATAL("inconsistent state");
}
}
}
@@ -2028,22 +2032,21 @@ bool SemiSpace::EnsureCurrentCapacity() {
return true;
}
-AllocationInfo LocalAllocationBuffer::Close() {
+LinearAllocationArea LocalAllocationBuffer::Close() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
ClearRecordedSlots::kNo);
- const AllocationInfo old_info = allocation_info_;
- allocation_info_ = AllocationInfo(nullptr, nullptr);
+ const LinearAllocationArea old_info = allocation_info_;
+ allocation_info_ = LinearAllocationArea(nullptr, nullptr);
return old_info;
}
- return AllocationInfo(nullptr, nullptr);
+ return LinearAllocationArea(nullptr, nullptr);
}
-
-LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
- AllocationInfo allocation_info)
+LocalAllocationBuffer::LocalAllocationBuffer(
+ Heap* heap, LinearAllocationArea allocation_info)
: heap_(heap), allocation_info_(allocation_info) {
if (IsValid()) {
heap_->CreateFillerObjectAt(
@@ -2074,21 +2077,25 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
return *this;
}
+void NewSpace::UpdateLinearAllocationArea() {
+ Address old_top = top();
+ Address new_top = to_space_.page_low();
-void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
+ allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
UpdateInlineAllocationLimit(0);
+ // TODO(ofrobots): It would be more correct to do a step before setting the
+ // limit on the new allocation area. However, fixing this causes a regression
+ // due to the idle scavenger getting pinged too frequently. crbug.com/795323.
+ InlineAllocationStep(old_top, new_top, nullptr, 0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-
-void NewSpace::ResetAllocationInfo() {
- Address old_top = allocation_info_.top();
+void NewSpace::ResetLinearAllocationArea() {
to_space_.Reset();
- UpdateAllocationInfo();
+ UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
@@ -2097,29 +2104,19 @@ void NewSpace::ResetAllocationInfo() {
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearLiveness(p);
}
- InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
-
-void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
- if (heap()->inline_allocation_disabled()) {
- // Lowest limit when linear allocation was disabled.
- Address high = to_space_.page_high();
- Address new_top = allocation_info_.top() + size_in_bytes;
- allocation_info_.set_limit(Min(new_top, high));
- } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
- // Normal limit is the end of the current page.
- allocation_info_.set_limit(to_space_.page_high());
- } else {
- // Lower limit during incremental marking.
- Address high = to_space_.page_high();
- Address new_top = allocation_info_.top() + size_in_bytes;
- Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
- allocation_info_.set_limit(Min(new_limit, high));
- }
+void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
+ allocation_info_.set_limit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
+void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), limit(), min_size);
+ DCHECK_LE(new_limit, limit());
+ DecreaseLimit(new_limit);
+}
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
@@ -2133,7 +2130,7 @@ bool NewSpace::AddFreshPage() {
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- UpdateAllocationInfo();
+ UpdateLinearAllocationArea();
return true;
}
@@ -2158,8 +2155,6 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
-
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
@@ -2180,54 +2175,59 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
-
-void NewSpace::StartNextInlineAllocationStep() {
- if (!allocation_observers_paused_) {
- top_on_previous_step_ =
- !allocation_observers_.empty() ? allocation_info_.top() : 0;
+void SpaceWithLinearArea::StartNextInlineAllocationStep() {
+ if (AllocationObserversActive()) {
+ top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
+ } else {
+ DCHECK_NULL(top_on_previous_step_);
}
}
-void NewSpace::PauseAllocationObservers() {
- // Do a step to account for memory allocated so far.
+void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
InlineAllocationStep(top(), top(), nullptr, 0);
- Space::PauseAllocationObservers();
- top_on_previous_step_ = 0;
- UpdateInlineAllocationLimit(0);
+ Space::AddAllocationObserver(observer);
+ DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
}
-void PagedSpace::PauseAllocationObservers() {
- // Do a step to account for memory allocated so far.
- if (top_on_previous_step_) {
- int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
- AllocationStep(bytes_allocated, nullptr, 0);
- }
- Space::PauseAllocationObservers();
- top_on_previous_step_ = 0;
+void SpaceWithLinearArea::RemoveAllocationObserver(
+ AllocationObserver* observer) {
+ Address top_for_next_step =
+ allocation_observers_.size() == 1 ? nullptr : top();
+ InlineAllocationStep(top(), top_for_next_step, nullptr, 0);
+ Space::RemoveAllocationObserver(observer);
+ DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
}
-void NewSpace::ResumeAllocationObservers() {
+void SpaceWithLinearArea::PauseAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ InlineAllocationStep(top(), nullptr, nullptr, 0);
+ Space::PauseAllocationObservers();
DCHECK_NULL(top_on_previous_step_);
- Space::ResumeAllocationObservers();
- StartNextInlineAllocationStep();
+ UpdateInlineAllocationLimit(0);
}
-// TODO(ofrobots): refactor into SpaceWithLinearArea
-void PagedSpace::ResumeAllocationObservers() {
+void SpaceWithLinearArea::ResumeAllocationObservers() {
DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
-void NewSpace::InlineAllocationStep(Address top, Address new_top,
- Address soon_object, size_t size) {
+void SpaceWithLinearArea::InlineAllocationStep(Address top,
+ Address top_for_next_step,
+ Address soon_object,
+ size_t size) {
if (top_on_previous_step_) {
- int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
- for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(bytes_allocated, soon_object, size);
+ if (top < top_on_previous_step_) {
+ // Generated code decreased the top pointer to do folded allocations.
+ DCHECK_NOT_NULL(top);
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top;
}
- top_on_previous_step_ = new_top;
+ int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+ AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
+ top_on_previous_step_ = top_for_next_step;
}
}
@@ -2372,7 +2372,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
@@ -2416,7 +2416,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(delta, AllocatePageSize()));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
@@ -2584,159 +2584,6 @@ void SemiSpaceIterator::Initialize(Address start, Address end) {
limit_ = end;
}
-#ifdef DEBUG
-// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms(Isolate* isolate) {
-// We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
-#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
-#undef CLEAR_HISTOGRAM
-
- isolate->js_spill_information()->Clear();
-}
-
-static int CollectHistogramInfo(HeapObject* obj) {
- Isolate* isolate = obj->GetIsolate();
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- DCHECK_NOT_NULL(isolate->heap_histograms()[type].name());
- isolate->heap_histograms()[type].increment_number(1);
- isolate->heap_histograms()[type].increment_bytes(obj->Size());
-
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)
- ->IncrementSpillStatistics(isolate->js_spill_information());
- }
-
- return obj->Size();
-}
-
-
-static void ReportHistogram(Isolate* isolate, bool print_spill) {
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (isolate->heap_histograms()[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- isolate->heap_histograms()[i].name(),
- isolate->heap_histograms()[i].number(),
- isolate->heap_histograms()[i].bytes());
- }
- }
- PrintF("\n");
-
- // Summarize string types.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += isolate->heap_histograms()[type].number(); \
- string_bytes += isolate->heap_histograms()[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
- string_bytes);
- }
-
- if (FLAG_collect_heap_spill_statistics && print_spill) {
- isolate->js_spill_information()->Print();
- }
-}
-#endif // DEBUG
-
-
-// Support for statistics gathering for --heap-stats and --log-gc.
-void NewSpace::ClearHistograms() {
- for (int i = 0; i <= LAST_TYPE; i++) {
- allocated_histogram_[i].clear();
- promoted_histogram_[i].clear();
- }
-}
-
-
-// Because the copying collector does not touch garbage objects, we iterate
-// the new space before a collection to get a histogram of allocated objects.
-// This only happens when --log-gc flag is set.
-void NewSpace::CollectStatistics() {
- ClearHistograms();
- SemiSpaceIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next())
- RecordAllocation(obj);
-}
-
-
-static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
- const char* description) {
- LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
- if (string_number > 0) {
- LOG(isolate,
- HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- // Then do the other types.
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].number() > 0) {
- LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
- LOG(isolate, HeapSampleEndEvent("NewSpace", description));
-}
-
-
-void NewSpace::ReportStatistics() {
-#ifdef DEBUG
- if (FLAG_heap_stats) {
- float pct = static_cast<float>(Available()) / TotalCapacity();
- PrintF(" capacity: %" PRIuS ", available: %" PRIuS ", %%%d\n",
- TotalCapacity(), Available(), static_cast<int>(pct * 100));
- PrintF("\n Object Histogram:\n");
- for (int i = 0; i <= LAST_TYPE; i++) {
- if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
- allocated_histogram_[i].number(),
- allocated_histogram_[i].bytes());
- }
- }
- PrintF("\n");
- }
-#endif // DEBUG
-
- if (FLAG_log_gc) {
- Isolate* isolate = heap()->isolate();
- DoReportStatistics(isolate, allocated_histogram_, "allocated");
- DoReportStatistics(isolate, promoted_histogram_, "promoted");
- }
-}
-
-
-void NewSpace::RecordAllocation(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- allocated_histogram_[type].increment_number(1);
- allocated_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-void NewSpace::RecordPromotion(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- DCHECK(0 <= type && type <= LAST_TYPE);
- promoted_histogram_[type].increment_number(1);
- promoted_histogram_[type].increment_bytes(obj->Size());
-}
-
-
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -2839,11 +2686,6 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-void FreeListCategory::Invalidate() {
- Reset();
- type_ = kInvalidCategory;
-}
-
FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
@@ -2932,9 +2774,9 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
-FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
+FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace* node = nullptr;
-
// First try the allocation fast path: try to allocate the minimum element
// size of a free list category. This operation is constant time.
FreeListCategoryType type =
@@ -2964,75 +2806,14 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
-bool FreeList::Allocate(size_t size_in_bytes) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
- DCHECK_LE(owner_->top(), owner_->limit());
-#ifdef DEBUG
- if (owner_->top() != owner_->limit()) {
- DCHECK_EQ(Page::FromAddress(owner_->top()),
- Page::FromAddress(owner_->limit() - 1));
- }
-#endif
- // Don't free list allocate if there is linear space available.
- DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
- size_in_bytes);
-
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- owner_->EmptyAllocationInfo();
-
- if (!owner_->is_local()) {
- owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
- }
-
- size_t new_node_size = 0;
- FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == nullptr) return false;
-
- DCHECK_GE(new_node_size, size_in_bytes);
-
-#ifdef DEBUG
- for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] =
- Smi::FromInt(kCodeZapValue);
- }
-#endif
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- owner_->IncreaseAllocatedBytes(new_node_size,
- Page::FromAddress(new_node->address()));
-
- Address start = new_node->address();
- Address end = new_node->address() + new_node_size;
- Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
- DCHECK_LE(limit, end);
- DCHECK_LE(size_in_bytes, limit - start);
- if (limit != end) {
- owner_->Free(limit, end - limit);
- }
- owner_->SetAllocationInfo(start, limit);
-
- return true;
-}
-
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
- page->ForAllFreeListCategories(
- [this, &sum](FreeListCategory* category) {
- DCHECK_EQ(this, category->owner());
- sum += category->available();
- RemoveCategory(category);
- category->Invalidate();
- });
+ page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ DCHECK_EQ(this, category->owner());
+ sum += category->available();
+ RemoveCategory(category);
+ category->Reset();
+ });
return sum;
}
@@ -3054,6 +2835,7 @@ void FreeList::RepairLists(Heap* heap) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, kNumberOfCategories);
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
@@ -3070,6 +2852,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, kNumberOfCategories);
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
@@ -3152,7 +2935,7 @@ size_t FreeList::SumFreeLists() {
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
- EmptyAllocationInfo();
+ FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@@ -3181,6 +2964,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
Address start = page->HighWaterMark();
Address end = page->area_end();
+ if (start < end - size) {
+ // A region at the high watermark is already in free list.
+ HeapObject* filler = HeapObject::FromAddress(start);
+ CHECK(filler->IsFiller());
+ start += filler->Size();
+ }
CHECK_EQ(size, static_cast<int>(end - start));
heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
}
@@ -3194,7 +2983,7 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// After waiting for the sweeper threads, there may be new free-list
// entries.
- return free_list_.Allocate(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
@@ -3204,27 +2993,29 @@ bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
- return free_list_.Allocate(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
}
return false;
}
-bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
- return RawSlowAllocateRaw(size_in_bytes);
+ heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
-bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
- return RawSlowAllocateRaw(size_in_bytes);
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
+ return RawSlowRefillLinearAllocationArea(size_in_bytes);
}
-bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
+
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
@@ -3238,14 +3029,18 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
@@ -3254,14 +3049,17 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
- if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes)))
+ return true;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
- return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes));
}
// If sweeper threads are active, wait for them at that point and steal
@@ -3270,23 +3068,6 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
return SweepAndRetryAllocation(size_in_bytes);
}
-#ifdef DEBUG
-void PagedSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" PRIuS ", waste: %" PRIuS
- ", available: %" PRIuS ", %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- heap()->mark_compact_collector()->EnsureSweepingCompleted();
- ClearHistograms(heap()->isolate());
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next())
- CollectHistogramInfo(obj);
- ReportHistogram(heap()->isolate(), true);
-}
-#endif
-
-
// -----------------------------------------------------------------------------
// MapSpace implementation
@@ -3436,7 +3217,6 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
auto it = chunk_map_.find(reinterpret_cast<Address>(key));
if (it != chunk_map_.end()) {
LargePage* page = it->second;
- DCHECK(LargePage::IsValid(page));
if (page->Contains(a)) {
return page;
}
@@ -3572,13 +3352,14 @@ void LargeObjectSpace::Verify() {
// We have only code, sequential strings, external strings (sequential
// strings that have been morphed into external strings), thin strings
// (sequential strings that have been morphed into thin strings), fixed
- // arrays, fixed double arrays, byte arrays, feedback vectors and free space
- // (right after allocation) in the large object space.
+ // arrays, fixed double arrays, byte arrays, feedback vectors, bigints and
+ // free space (right after allocation) in the large object space.
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
object->IsPropertyArray() || object->IsByteArray() ||
- object->IsFeedbackVector() || object->IsFreeSpace());
+ object->IsFeedbackVector() || object->IsBigInt() ||
+ object->IsFreeSpace());
// The object itself should look OK.
object->ObjectVerify();
@@ -3625,25 +3406,6 @@ void LargeObjectSpace::Print() {
}
}
-
-void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %" PRIuS "\n", size_);
- int num_objects = 0;
- ClearHistograms(heap()->isolate());
- LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- num_objects++;
- CollectHistogramInfo(obj);
- }
-
- PrintF(
- " number of objects %d, "
- "size of objects %" PRIuS "\n",
- num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
-}
-
-
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 3fb3c39496..08fef7d6e3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -34,12 +34,12 @@ class HeapTester;
class TestCodeRangeScope;
} // namespace heap
-class AllocationInfo;
class AllocationObserver;
class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class Isolate;
+class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
@@ -170,8 +170,6 @@ class FreeListCategory {
next_ = nullptr;
}
- void Invalidate();
-
void Reset();
void ResetStats() { Reset(); }
@@ -425,8 +423,6 @@ class MemoryChunk {
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
}
- static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
-
Address address() const {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
@@ -610,25 +606,9 @@ class MemoryChunk {
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
- Space* owner() const {
- uintptr_t owner_value = base::AsAtomicWord::Acquire_Load(
- reinterpret_cast<const uintptr_t*>(&owner_));
- return ((owner_value & kPageHeaderTagMask) == kPageHeaderTag)
- ? reinterpret_cast<Space*>(owner_value - kPageHeaderTag)
- : nullptr;
- }
-
- void set_owner(Space* space) {
- DCHECK_EQ(0, reinterpret_cast<uintptr_t>(space) & kPageHeaderTagMask);
- base::AsAtomicWord::Release_Store(
- reinterpret_cast<uintptr_t*>(&owner_),
- reinterpret_cast<uintptr_t>(space) + kPageHeaderTag);
- DCHECK_EQ(kPageHeaderTag, base::AsAtomicWord::Relaxed_Load(
- reinterpret_cast<const uintptr_t*>(&owner_)) &
- kPageHeaderTagMask);
- }
+ Space* owner() const { return owner_.Value(); }
- bool HasPageHeader() { return owner() != nullptr; }
+ void set_owner(Space* space) { owner_.SetValue(space); }
void InsertAfter(MemoryChunk* other);
void Unlink();
@@ -640,6 +620,8 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
+ inline void InitializeFreeListCategories();
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -661,10 +643,8 @@ class MemoryChunk {
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
- // The identity of the owning space. This is tagged as a failure pointer, but
- // no failure can be in an object, so this can be distinguished from any entry
- // in a fixed array.
- Address owner_;
+ // The space owning this memory chunk.
+ base::AtomicValue<Space*> owner_;
Heap* heap_;
@@ -792,8 +772,6 @@ class Page : public MemoryChunk {
static Page* ConvertNewToOld(Page* old_page);
- inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
-
// Create a Page object that is only used as anchor for the doubly-linked
// list of real pages.
explicit Page(Space* owner) { InitializeAsAnchor(owner); }
@@ -845,8 +823,6 @@ class Page : public MemoryChunk {
return &categories_[type];
}
- inline void InitializeFreeListCategories();
-
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
size_t wasted_memory() { return wasted_memory_; }
@@ -933,9 +909,11 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- void AddAllocationObserver(AllocationObserver* observer);
+ V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
+ AllocationObserver* observer);
- void RemoveAllocationObserver(AllocationObserver* observer);
+ V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
+ AllocationObserver* observer);
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
@@ -995,11 +973,14 @@ class Space : public Malloced {
protected:
intptr_t GetNextInlineAllocationStepSize();
+ bool AllocationObserversActive() {
+ return !allocation_observers_paused_ && !allocation_observers_.empty();
+ }
std::vector<AllocationObserver*> allocation_observers_;
bool allocation_observers_paused_;
- private:
+ protected:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
@@ -1223,19 +1204,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void FreeQueuedChunks();
void WaitUntilCompleted();
void TearDown();
-
- bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
-
- int NumberOfDelayedChunks() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return static_cast<int>(delayed_regular_chunks_.size());
- }
-
int NumberOfChunks();
private:
static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 24;
+ static const int kMaxUnmapperTasks = 4;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1253,12 +1226,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
- if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
- chunks_[type].push_back(chunk);
- } else {
- DCHECK_EQ(type, kRegular);
- delayed_regular_chunks_.push_back(chunk);
- }
+ chunks_[type].push_back(chunk);
}
template <ChunkQueueType type>
@@ -1270,7 +1238,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
- void ReconsiderDelayedChunks();
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
@@ -1278,10 +1245,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- // Delayed chunks cannot be processed in the current unmapping cycle because
- // of dependencies such as an active sweeper.
- // See MemoryAllocator::CanFreeMemoryChunk.
- std::list<MemoryChunk*> delayed_regular_chunks_;
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
@@ -1342,8 +1305,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
- bool CanFreeMemoryChunk(MemoryChunk* chunk);
-
// Returns allocated spaces in bytes.
size_t Size() { return size_.Value(); }
@@ -1415,11 +1376,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
-
private:
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
@@ -1584,10 +1540,10 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// An abstraction of allocation and relocation pointers in a page-structured
// space.
-class AllocationInfo {
+class LinearAllocationArea {
public:
- AllocationInfo() : top_(nullptr), limit_(nullptr) {}
- AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+ LinearAllocationArea() : top_(nullptr), limit_(nullptr) {}
+ LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
set_top(top);
@@ -1785,10 +1741,11 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
- // Finds a node of size at least size_in_bytes and sets up a linear allocation
- // area using this node. Returns false if there is no such node and the caller
- // has to retry allocation after collecting garbage.
- MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ MUST_USE_RESULT FreeSpace* Allocate(size_t size_in_bytes, size_t* node_size);
// Clear the free list.
void Reset();
@@ -1887,8 +1844,6 @@ class V8_EXPORT_PRIVATE FreeList {
static const size_t kMediumAllocationMax = kSmallListMax;
static const size_t kLargeAllocationMax = kMediumListMax;
- FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
-
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
@@ -1975,16 +1930,73 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject* object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
- AllocationInfo Close();
+ LinearAllocationArea Close();
private:
- LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+ LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
Heap* heap_;
- AllocationInfo allocation_info_;
+ LinearAllocationArea allocation_info_;
+};
+
+class SpaceWithLinearArea : public Space {
+ public:
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, Executability executable)
+ : Space(heap, id, executable), top_on_previous_step_(0) {
+ allocation_info_.Reset(nullptr, nullptr);
+ }
+
+ virtual bool SupportsInlineAllocation() = 0;
+
+ // Returns the allocation pointer in this space.
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
+
+ // The allocation top address.
+ Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
+
+ V8_EXPORT_PRIVATE void AddAllocationObserver(
+ AllocationObserver* observer) override;
+ V8_EXPORT_PRIVATE void RemoveAllocationObserver(
+ AllocationObserver* observer) override;
+ V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
+ V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
+
+ // When allocation observers are active we may use a lower limit to allow the
+ // observers to 'interrupt' earlier than the natural limit. Given a linear
+ // area bounded by [start, end), this function computes the limit to use to
+ // allow proper observation based on existing observers. min_size specifies
+ // the minimum size that the limited area should have.
+ Address ComputeLimit(Address start, Address end, size_t min_size);
+ V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
+ size_t min_size) = 0;
+
+ protected:
+ // If we are doing inline allocation in steps, this method performs the 'step'
+ // operation. top is the memory address of the bump pointer at the last
+ // inline allocation (i.e. it determines the numbers of bytes actually
+ // allocated since the last step.) top_for_next_step is the address of the
+ // bump pointer where the next byte is going to be allocated from. top and
+ // top_for_next_step may be different when we cross a page boundary or reset
+ // the space.
+ // TODO(ofrobots): clarify the precise difference between this and
+ // Space::AllocationStep.
+ void InlineAllocationStep(Address top, Address top_for_next_step,
+ Address soon_object, size_t size);
+ V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
+
+ // TODO(ofrobots): make these private after refactoring is complete.
+ LinearAllocationArea allocation_info_;
+ Address top_on_previous_step_;
};
-class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
+class V8_EXPORT_PRIVATE PagedSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
typedef PageIterator iterator;
@@ -2056,18 +2068,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_.wasted_bytes(); }
- // Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top(); }
- Address limit() { return allocation_info_.limit(); }
-
- // The allocation top address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
-
- // The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
- }
-
enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
// Allocate the requested number of bytes in the space if possible, return a
@@ -2106,16 +2106,13 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
inline bool TryFreeLast(HeapObject* object, int object_size);
- void ResetFreeList() { free_list_.Reset(); }
+ void ResetFreeList();
- void PauseAllocationObservers() override;
- void ResumeAllocationObservers() override;
+ // Empty space linear allocation area, returning unused area to free list.
+ void FreeLinearAllocationArea();
- // Empty space allocation info, returning unused area to free list.
- void EmptyAllocationInfo();
-
- void MarkAllocationInfoBlack();
- void UnmarkAllocationInfo();
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
@@ -2165,9 +2162,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Print meta info and objects in this space.
void Print() override;
- // Reports statistics for the space
- void ReportStatistics();
-
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
@@ -2212,11 +2206,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
- Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
- void SetAllocationInfo(Address top, Address limit);
+ void SetLinearAllocationArea(Address top, Address limit);
private:
- // Set space allocation info.
+ // Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
@@ -2224,8 +2217,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
- void StartNextInlineAllocationStep() override;
- bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
+ void UpdateInlineAllocationLimit(size_t min_size) override;
+ bool SupportsInlineAllocation() override {
+ return identity() == OLD_SPACE && !is_local();
+ }
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2256,6 +2251,10 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
+
+ MUST_USE_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes);
+
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
@@ -2265,11 +2264,12 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
- MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT virtual bool SlowRefillLinearAllocationArea(
+ int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
- MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT bool RawSlowRefillLinearAllocationArea(int size_in_bytes);
size_t area_size_;
@@ -2282,14 +2282,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// The space's free list.
FreeList free_list_;
- // Normal allocation information.
- AllocationInfo allocation_info_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
- Address top_on_previous_step_;
-
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -2500,18 +2495,15 @@ class SemiSpaceIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
-class NewSpace : public Space {
+class NewSpace : public SpaceWithLinearArea {
public:
typedef PageIterator iterator;
explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- top_on_previous_step_(0),
+ : SpaceWithLinearArea(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
- reservation_(),
- allocated_histogram_(nullptr),
- promoted_histogram_(nullptr) {}
+ reservation_() {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
@@ -2631,18 +2623,6 @@ class NewSpace : public Space {
return to_space_.minimum_capacity();
}
- // Return the address of the allocation pointer in the active semispace.
- Address top() {
- DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
- return allocation_info_.top();
- }
-
- // Return the address of the allocation pointer limit in the active semispace.
- Address limit() {
- DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
- return allocation_info_.limit();
- }
-
void ResetOriginalTop() {
DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit());
@@ -2660,14 +2640,6 @@ class NewSpace : public Space {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- // The allocation top and limit address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
-
- // The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
- }
-
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
@@ -2681,19 +2653,14 @@ class NewSpace : public Space {
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
- void ResetAllocationInfo();
+ void ResetLinearAllocationArea();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(int size_in_bytes);
-
- void DisableInlineAllocationSteps() {
- top_on_previous_step_ = 0;
- UpdateInlineAllocationLimit(0);
- }
+ void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
@@ -2729,19 +2696,6 @@ class NewSpace : public Space {
void Print() override { to_space_.Print(); }
#endif
- // Iterates the active semispace to collect statistics.
- void CollectStatistics();
- // Reports previously collected statistics of the active semispace.
- void ReportStatistics();
- // Clears previously collected statistics.
- void ClearHistograms();
-
- // Record the allocation or promotion of a heap object. Note that we don't
- // record every single allocation, but only those that happen in the
- // to space during a scavenge GC.
- void RecordAllocation(HeapObject* obj);
- void RecordPromotion(HeapObject* obj);
-
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
@@ -2757,9 +2711,6 @@ class NewSpace : public Space {
SemiSpace* active_space() { return &to_space_; }
- void PauseAllocationObservers() override;
- void ResumeAllocationObservers() override;
-
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
@@ -2769,16 +2720,12 @@ class NewSpace : public Space {
SemiSpace& to_space() { return to_space_; }
private:
- // Update allocation info to match the current to-space page.
- void UpdateAllocationInfo();
+ // Update linear allocation area to match the current to-space page.
+ void UpdateLinearAllocationArea();
base::Mutex mutex_;
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
- Address top_on_previous_step_;
- // The top and the limit at the time of setting the allocation info.
+ // The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
base::AtomicValue<Address> original_top_;
base::AtomicValue<Address> original_limit_;
@@ -2788,20 +2735,8 @@ class NewSpace : public Space {
SemiSpace from_space_;
VirtualMemory reservation_;
- HistogramInfo* allocated_histogram_;
- HistogramInfo* promoted_histogram_;
-
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
-
- // If we are doing inline allocation in steps, this method performs the 'step'
- // operation. top is the memory address of the bump pointer at the last
- // inline allocation (i.e. it determines the numbers of bytes actually
- // allocated since the last step.) new_top is the address of the bump pointer
- // where the next byte is going to be allocated from. top and new_top may be
- // different when we cross a page boundary or reset the space.
- void InlineAllocationStep(Address top, Address new_top, Address soon_object,
- size_t size);
- void StartNextInlineAllocationStep() override;
+ bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceIterator;
};
@@ -2832,7 +2767,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
- MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
+ MUST_USE_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes) override;
};
@@ -2986,13 +2922,14 @@ class LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+ base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
+
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
#ifdef DEBUG
void Print() override;
- void ReportStatistics();
#endif
private:
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 4613b705fa..724edf5721 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -58,7 +58,7 @@ void StoreBuffer::SetUp() {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
- base::OS::MemoryPermission::kReadWrite)) {
+ PageAllocator::kReadWrite)) {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
@@ -105,10 +105,14 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
Address last_inserted_addr = nullptr;
+
+ // We are taking the chunk map mutex here because the page lookup of addr
+ // below may require us to check if addr is part of a large page.
+ base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
Address addr = *current;
- Page* page = Page::FromAnyPointerAddress(heap_, addr);
+ MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
if (IsDeletionAddress(addr)) {
last_inserted_addr = nullptr;
current++;
@@ -116,15 +120,15 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK(!IsDeletionAddress(end));
addr = UnmarkDeletionAddress(addr);
if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
} else {
- RememberedSet<OLD_TO_NEW>::Remove(page, addr);
+ RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
}
} else {
DCHECK(!IsDeletionAddress(addr));
if (addr != last_inserted_addr) {
- RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
last_inserted_addr = addr;
}
}
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 75da76490e..a69abcc886 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -10,6 +10,7 @@
#include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
@@ -167,14 +168,19 @@ class StoreBuffer {
class Task : public CancelableTask {
public:
Task(Isolate* isolate, StoreBuffer* store_buffer)
- : CancelableTask(isolate), store_buffer_(store_buffer) {}
+ : CancelableTask(isolate),
+ store_buffer_(store_buffer),
+ tracer_(isolate->heap()->tracer()) {}
virtual ~Task() {}
private:
void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
store_buffer_->ConcurrentlyProcessStoreBuffer();
}
StoreBuffer* store_buffer_;
+ GCTracer* tracer_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
new file mode 100644
index 0000000000..710282d573
--- /dev/null
+++ b/deps/v8/src/heap/stress-marking-observer.cc
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/stress-marking-observer.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(majeski): meaningful step_size
+StressMarkingObserver::StressMarkingObserver(Heap& heap)
+ : AllocationObserver(64), heap_(heap) {}
+
+void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
+ size_t size) {
+ heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
new file mode 100644
index 0000000000..b97c2b179c
--- /dev/null
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STRESS_MARKING_OBSERVER_H_
+#define V8_HEAP_STRESS_MARKING_OBSERVER_H_
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class StressMarkingObserver : public AllocationObserver {
+ public:
+ explicit StressMarkingObserver(Heap& heap);
+
+ void Step(int bytes_allocated, Address soon_object, size_t size) override;
+
+ private:
+ Heap& heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
new file mode 100644
index 0000000000..c9f169ae45
--- /dev/null
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -0,0 +1,94 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/stress-scavenge-observer.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/spaces.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(majeski): meaningful step_size
+StressScavengeObserver::StressScavengeObserver(Heap& heap)
+ : AllocationObserver(64),
+ heap_(heap),
+ has_requested_gc_(false),
+ max_new_space_size_reached_(0.0) {
+ limit_percentage_ = NextLimit();
+
+ if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[StressScavenge] %d%% is the new limit\n", limit_percentage_);
+ }
+}
+
+void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
+ size_t size) {
+ if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
+ return;
+ }
+
+ double current_percent =
+ heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[Scavenge] %.2lf%% of the new space capacity reached\n",
+ current_percent);
+ }
+
+ if (FLAG_fuzzer_gc_analysis) {
+ max_new_space_size_reached_ =
+ std::max(max_new_space_size_reached_, current_percent);
+ return;
+ }
+
+ if (static_cast<int>(current_percent) >= limit_percentage_) {
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
+ }
+
+ has_requested_gc_ = true;
+ heap_.isolate()->stack_guard()->RequestGC();
+ }
+}
+
+bool StressScavengeObserver::HasRequestedGC() const {
+ return has_requested_gc_;
+}
+
+void StressScavengeObserver::RequestedGCDone() {
+ double current_percent =
+ heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ limit_percentage_ = NextLimit(static_cast<int>(current_percent));
+
+ if (FLAG_trace_stress_scavenge) {
+ heap_.isolate()->PrintWithTimestamp(
+ "[Scavenge] %.2lf%% of the new space capacity reached\n",
+ current_percent);
+ heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
+ limit_percentage_);
+ }
+
+ has_requested_gc_ = false;
+}
+
+double StressScavengeObserver::MaxNewSpaceSizeReached() const {
+ return max_new_space_size_reached_;
+}
+
+int StressScavengeObserver::NextLimit(int min) {
+ int max = FLAG_stress_scavenge;
+ if (min >= max) {
+ return max;
+ }
+
+ return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
new file mode 100644
index 0000000000..6f69afe4c5
--- /dev/null
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
+#define V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class StressScavengeObserver : public AllocationObserver {
+ public:
+ explicit StressScavengeObserver(Heap& heap);
+
+ void Step(int bytes_allocated, Address soon_object, size_t size) override;
+
+ bool HasRequestedGC() const;
+ void RequestedGCDone();
+
+ // The maximum percent of the newspace capacity reached. This is tracked when
+ // specyfing --fuzzer-gc-analysis.
+ double MaxNewSpaceSizeReached() const;
+
+ private:
+ Heap& heap_;
+ int limit_percentage_;
+ bool has_requested_gc_;
+
+ double max_new_space_size_reached_;
+
+ int NextLimit(int min = 0);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 17375aad97..25ba0df8fd 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -5,6 +5,7 @@
#include "src/heap/sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects-inl.h"
@@ -68,21 +69,24 @@ class Sweeper::SweeperTask final : public CancelableTask {
sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
- space_to_start_(space_to_start) {}
+ space_to_start_(space_to_start),
+ tracer_(isolate->heap()->tracer()) {}
virtual ~SweeperTask() {}
private:
void RunInternal() final {
- DCHECK_GE(space_to_start_, FIRST_SPACE);
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
+ DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
+ const int offset = space_to_start_ - FIRST_PAGED_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+ const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
// Do not sweep code space concurrently.
if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
- DCHECK_GE(space_id, FIRST_SPACE);
+ DCHECK_GE(space_id, FIRST_PAGED_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
}
@@ -94,6 +98,7 @@ class Sweeper::SweeperTask final : public CancelableTask {
base::Semaphore* const pending_sweeper_tasks_;
base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
AllocationSpace space_to_start_;
+ GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
@@ -127,6 +132,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true;
+ iterability_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
@@ -144,7 +150,7 @@ void Sweeper::StartSweeperTasks() {
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) return;
+ DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_.Increment(1);
SweeperTask* task = new SweeperTask(heap_->isolate(), this,
&pending_sweeper_tasks_semaphore_,
@@ -200,6 +206,8 @@ void Sweeper::AbortAndWaitForTasks() {
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
+ EnsureIterabilityCompleted();
+
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces(
@@ -207,24 +215,11 @@ void Sweeper::EnsureCompleted() {
AbortAndWaitForTasks();
- ForAllSweepingSpaces([this](AllocationSpace space) {
- if (space == NEW_SPACE) {
- swept_list_[NEW_SPACE].clear();
- }
- DCHECK(sweeping_list_[space].empty());
- });
+ ForAllSweepingSpaces(
+ [this](AllocationSpace space) { CHECK(sweeping_list_[space].empty()); });
sweeping_in_progress_ = false;
}
-void Sweeper::EnsureNewSpaceCompleted() {
- if (!sweeping_in_progress_) return;
- if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
- for (Page* p : *heap_->new_space()) {
- SweepOrWaitUntilSweepingCompleted(p);
- }
- }
-}
-
bool Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
@@ -281,7 +276,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
@@ -320,7 +315,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
+ memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
@@ -408,6 +403,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
+ DCHECK(IsValidSweepingSpace(identity));
int max_freed = 0;
{
base::LockGuard<base::Mutex> guard(page->mutex());
@@ -423,11 +419,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
- if (identity == NEW_SPACE) {
- RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
- } else {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
- }
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
@@ -461,9 +453,9 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
- DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
PrepareToBeSweptPage(space, page);
} else {
// Page has been temporarily removed from the sweeper. Accounting already
@@ -475,17 +467,19 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
- page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
- if (space != NEW_SPACE) {
- heap_->paged_space(space)->IncreaseAllocatedBytes(
- marking_state_->live_bytes(page), page);
- }
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+ page->ForAllFreeListCategories(
+ [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ heap_->paged_space(space)->IncreaseAllocatedBytes(
+ marking_state_->live_bytes(page), page);
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsValidSweepingSpace(space));
Page* page = nullptr;
if (!sweeping_list_[space].empty()) {
page = sweeping_list_[space].front();
@@ -494,5 +488,94 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
return page;
}
+void Sweeper::EnsurePageIsIterable(Page* page) {
+ AllocationSpace space = page->owner()->identity();
+ if (IsValidSweepingSpace(space)) {
+ SweepOrWaitUntilSweepingCompleted(page);
+ } else {
+ DCHECK(IsValidIterabilitySpace(space));
+ EnsureIterabilityCompleted();
+ }
+}
+
+void Sweeper::EnsureIterabilityCompleted() {
+ if (!iterability_in_progress_) return;
+
+ if (FLAG_concurrent_sweeping && iterability_task_started_) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(
+ iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
+ iterability_task_semaphore_.Wait();
+ }
+ iterability_task_started_ = false;
+ }
+
+ for (Page* page : iterability_list_) {
+ MakeIterable(page);
+ }
+ iterability_list_.clear();
+ iterability_in_progress_ = false;
+}
+
+class Sweeper::IterabilityTask final : public CancelableTask {
+ public:
+ IterabilityTask(Isolate* isolate, Sweeper* sweeper,
+ base::Semaphore* pending_iterability_task)
+ : CancelableTask(isolate),
+ sweeper_(sweeper),
+ pending_iterability_task_(pending_iterability_task),
+ tracer_(isolate->heap()->tracer()) {}
+
+ virtual ~IterabilityTask() {}
+
+ private:
+ void RunInternal() final {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
+ for (Page* page : sweeper_->iterability_list_) {
+ sweeper_->MakeIterable(page);
+ }
+ sweeper_->iterability_list_.clear();
+ pending_iterability_task_->Signal();
+ }
+
+ Sweeper* const sweeper_;
+ base::Semaphore* const pending_iterability_task_;
+ GCTracer* const tracer_;
+
+ DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
+};
+
+void Sweeper::StartIterabilityTasks() {
+ if (!iterability_in_progress_) return;
+
+ DCHECK(!iterability_task_started_);
+ if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
+ IterabilityTask* task = new IterabilityTask(heap_->isolate(), this,
+ &iterability_task_semaphore_);
+ iterability_task_id_ = task->id();
+ iterability_task_started_ = true;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+}
+
+void Sweeper::AddPageForIterability(Page* page) {
+ DCHECK(sweeping_in_progress_);
+ DCHECK(iterability_in_progress_);
+ DCHECK(!iterability_task_started_);
+ DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
+
+ iterability_list_.push_back(page);
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+}
+
+void Sweeper::MakeIterable(Page* page) {
+ DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ const FreeSpaceTreatmentMode free_space_mode =
+ Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
+ RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 9a8eef3115..6eee902bcc 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -23,6 +23,7 @@ enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
+ typedef std::vector<Page*> IterabilityList;
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
@@ -83,7 +84,10 @@ class Sweeper {
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
- stop_sweeper_tasks_(false) {}
+ stop_sweeper_tasks_(false),
+ iterability_task_semaphore_(0),
+ iterability_in_progress_(false),
+ iterability_task_started_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@@ -104,32 +108,38 @@ class Sweeper {
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
- void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
- void SweepOrWaitUntilSweepingCompleted(Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
+ void EnsurePageIsIterable(Page* page);
+
+ void AddPageForIterability(Page* page);
+ void StartIterabilityTasks();
+ void EnsureIterabilityCompleted();
+
private:
class IncrementalSweeperTask;
+ class IterabilityTask;
class SweeperTask;
- static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
- static const int kMaxSweeperTasks = kAllocationSpaces;
+ static const int kNumberOfSweepingSpaces = LAST_PAGED_SPACE + 1;
+ static const int kMaxSweeperTasks = 3;
template <typename Callback>
- void ForAllSweepingSpaces(Callback callback) {
- for (int i = 0; i < kAllocationSpaces; i++) {
- callback(static_cast<AllocationSpace>(i));
- }
+ void ForAllSweepingSpaces(Callback callback) const {
+ callback(OLD_SPACE);
+ callback(CODE_SPACE);
+ callback(MAP_SPACE);
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
- for (int i = 0; i < kAllocationSpaces; i++) {
- if (!sweeping_list_[i].empty()) return false;
- }
- return true;
+ bool is_done = true;
+ ForAllSweepingSpaces([this, &is_done](AllocationSpace space) {
+ if (!sweeping_list_[space].empty()) is_done = false;
+ });
+ return is_done;
}
void SweepSpaceFromTask(AllocationSpace identity);
@@ -144,14 +154,26 @@ class Sweeper {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+ void MakeIterable(Page* page);
+
+ bool IsValidIterabilitySpace(AllocationSpace space) {
+ return space == NEW_SPACE;
+ }
+
+ bool IsValidSweepingSpace(AllocationSpace space) {
+ return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ }
+
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
int num_tasks_;
- CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
+ CancelableTaskManager::Id task_ids_[kNumberOfSweepingSpaces];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
- SweptList swept_list_[kAllocationSpaces];
- SweepingList sweeping_list_[kAllocationSpaces];
+ SweptList swept_list_[kNumberOfSweepingSpaces];
+ SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
@@ -159,6 +181,13 @@ class Sweeper {
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
+
+ // Pages that are only made iterable but have their free lists ignored.
+ IterabilityList iterability_list_;
+ CancelableTaskManager::Id iterability_task_id_;
+ base::Semaphore iterability_task_semaphore_;
+ bool iterability_in_progress_;
+ bool iterability_task_started_;
};
} // namespace internal
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index ebc9f49dd9..368addd718 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -70,7 +70,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -153,7 +153,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -261,25 +261,14 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
Displacement Assembler::disp_at(Label* L) {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 99f52031ed..38508c7632 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -88,7 +88,7 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
// directly because older assemblers do not include support for xgetbv and
// there is no easy way to conditionally compile based on the assembler
// used.
- __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
}
@@ -398,7 +398,7 @@ bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
+ if (a[0] == 0xF && a[1] == 0x1F) return true;
return false;
}
@@ -415,28 +415,28 @@ void Assembler::Nop(int bytes) {
EMIT(0x90);
return;
case 3:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0);
return;
case 4:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x40);
EMIT(0);
return;
case 6:
EMIT(0x66);
case 5:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x44);
EMIT(0);
EMIT(0);
return;
case 7:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x80);
EMIT(0);
EMIT(0);
@@ -454,8 +454,8 @@ void Assembler::Nop(int bytes) {
EMIT(0x66);
bytes--;
case 8:
- EMIT(0xf);
- EMIT(0x1f);
+ EMIT(0xF);
+ EMIT(0x1F);
EMIT(0x84);
EMIT(0);
EMIT(0);
@@ -507,7 +507,7 @@ void Assembler::popfd() {
void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
if (x.is_int8()) {
- EMIT(0x6a);
+ EMIT(0x6A);
EMIT(x.immediate());
} else {
EMIT(0x68);
@@ -609,7 +609,7 @@ void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.immediate() & 0xff));
+ EMIT(static_cast<int8_t>(src.immediate() & 0xFF));
EMIT(static_cast<int8_t>(src.immediate() >> 8));
}
@@ -796,6 +796,13 @@ void Assembler::cmpxchg_w(const Operand& dst, Register src) {
emit_operand(src, dst);
}
+void Assembler::lfence() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAE);
+ EMIT(0xE8);
+}
+
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(2, Operand(dst), Immediate(imm32));
@@ -1378,7 +1385,7 @@ void Assembler::test_w(Register reg, Immediate imm16) {
} else {
EMIT(0x66);
EMIT(0xF7);
- EMIT(0xc0 | reg.code());
+ EMIT(0xC0 | reg.code());
emit_w(imm16);
}
}
@@ -2426,6 +2433,13 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::haddps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x7C);
+ emit_sse_operand(dst, src);
+}
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
@@ -2828,6 +2842,17 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
+void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x21);
+ emit_sse_operand(dst, src);
+ EMIT(offset);
+}
+
void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2908,8 +2933,8 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- EMIT(0x0f);
- EMIT(0x2e);
+ EMIT(0x0F);
+ EMIT(0x2E);
emit_sse_operand(dst, src);
}
@@ -2982,6 +3007,13 @@ void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
EMIT(cmp);
}
+void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ byte imm8) {
+ DCHECK(is_uint8(imm8));
+ vps(0xC6, dst, src1, src2);
+ EMIT(imm8);
+}
+
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
XMMRegister iop = XMMRegister::from_code(6);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
@@ -3043,6 +3075,12 @@ void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
+void Assembler::vinsertps(XMMRegister dst, XMMRegister src1,
+ const Operand& src2, int8_t offset) {
+ vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
int8_t offset) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
@@ -3186,12 +3224,12 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (mm != k0F || w != kW0) {
- EMIT(0xc4);
+ EMIT(0xC4);
// Change RXB from "110" to "111" to align with gdb disassembler.
- EMIT(0xe0 | mm);
- EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp);
+ EMIT(0xE0 | mm);
+ EMIT(w | ((~vreg.code() & 0xF) << 3) | l | pp);
} else {
- EMIT(0xc5);
+ EMIT(0xC5);
EMIT(((~vreg.code()) << 3) | l | pp);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 83e30df4f5..d57e3bee71 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -113,6 +113,7 @@ GENERAL_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
constexpr Register no_reg = Register::no_reg();
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -530,10 +531,6 @@ class Assembler : public AssemblerBase {
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -683,6 +680,9 @@ class Assembler : public AssemblerBase {
void cmpxchg_b(const Operand& dst, Register src);
void cmpxchg_w(const Operand& dst, Register src);
+ // Memory Fence
+ void lfence();
+
// Arithmetics
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
@@ -1004,6 +1004,8 @@ class Assembler : public AssemblerBase {
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
void rsqrtps(XMMRegister dst, const Operand& src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
+ void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
void minps(XMMRegister dst, const Operand& src);
void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
@@ -1149,6 +1151,10 @@ class Assembler : public AssemblerBase {
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void insertps(XMMRegister dst, XMMRegister src, int8_t offset) {
+ insertps(dst, Operand(src), offset);
+ }
+ void insertps(XMMRegister dst, const Operand& src, int8_t offset);
void pinsrb(XMMRegister dst, Register src, int8_t offset) {
pinsrb(dst, Operand(src), offset);
}
@@ -1397,6 +1403,14 @@ class Assembler : public AssemblerBase {
void vrsqrtps(XMMRegister dst, const Operand& src) {
vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vmovaps(XMMRegister dst, XMMRegister src) {
+ vps(0x28, dst, xmm0, Operand(src));
+ }
+ void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
+ vshufps(dst, src1, Operand(src2), imm8);
+ }
+ void vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1427,6 +1441,12 @@ class Assembler : public AssemblerBase {
}
void vpextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ int8_t offset) {
+ vinsertps(dst, src1, Operand(src2), offset);
+ }
+ void vinsertps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset);
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrb(dst, src1, Operand(src2), offset);
@@ -1459,6 +1479,12 @@ class Assembler : public AssemblerBase {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vmovdqu(XMMRegister dst, const Operand& src) {
+ vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
+ }
+ void vmovdqu(const Operand& dst, XMMRegister src) {
+ vinstr(0x7F, src, xmm0, dst, kF3, k0F, kWIG);
+ }
void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
void vmovd(XMMRegister dst, const Operand& src) {
vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 8ca0b5989f..697539713a 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -486,12 +486,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// pop the faked function when we return. Notice that we cannot store a
// reference to the trampoline code directly in this stub, because the
// builtin stubs may not have been generated yet.
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -588,7 +583,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -632,7 +627,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ Assert(equal, kExpectedAllocationSite);
+ __ Assert(equal, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -657,7 +652,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -731,9 +726,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in ebx or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(ebx);
@@ -828,9 +823,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -847,8 +842,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
__ cmp(ecx, Immediate(HOLEY_ELEMENTS));
- __ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -959,7 +955,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// previous handle scope.
__ mov(Operand::StaticVariable(next_address), ebx);
__ sub(Operand::StaticVariable(level_address), Immediate(1));
- __ Assert(above_equal, kInvalidHandleScopeLevel);
+ __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
__ cmp(edi, Operand::StaticVariable(limit_address));
__ j(not_equal, &delete_allocated_handles);
@@ -1007,7 +1003,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(return_value, isolate->factory()->null_value());
__ j(equal, &ok, Label::kNear);
- __ Abort(kAPICallReturnedInvalidObject);
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
__ bind(&ok);
#endif
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index a66334e3a0..8bd6b5f30c 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -16,8 +16,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -39,8 +38,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -133,8 +131,7 @@ class LabelConverter {
MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -452,8 +449,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 39c6ff0d5c..6ce62e93bb 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -87,14 +87,11 @@ static const ByteMnemonic short_immediate_instr[] = {
// register stalls. They are included for completeness and because the cmp
// variant is used by the RecordWrite stub. Because it does not update the
// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
+static ByteMnemonic byte_immediate_instr[] = {{0x0C, "or", UNSET_OP_ORDER},
+ {0x24, "and", UNSET_OP_ORDER},
+ {0x34, "xor", UNSET_OP_ORDER},
+ {0x3C, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
@@ -251,7 +248,7 @@ class DisassemblerIA32 {
private:
const NameConverter& converter_;
- byte vex_byte0_; // 0xc4 or 0xc5
+ byte vex_byte0_; // 0xC4 or 0xC5
byte vex_byte1_;
byte vex_byte2_; // only for 3 bytes vex prefix
InstructionTable* instruction_table_;
@@ -282,59 +279,59 @@ class DisassemblerIA32 {
};
bool vex_128() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 4) == 0;
}
bool vex_none() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 0;
}
bool vex_66() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 1;
}
bool vex_f3() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 2;
}
bool vex_f2() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 3;
}
bool vex_w() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte2_ & 0x80) != 0;
}
bool vex_0f() {
- if (vex_byte0_ == 0xc5) return true;
+ if (vex_byte0_ == 0xC5) return true;
return (vex_byte1_ & 3) == 1;
}
bool vex_0f38() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte1_ & 3) == 2;
}
bool vex_0f3a() {
- if (vex_byte0_ == 0xc5) return false;
+ if (vex_byte0_ == 0xC5) return false;
return (vex_byte1_ & 3) == 3;
}
int vex_vreg() {
- DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
- byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
- return ~(checked >> 3) & 0xf;
+ DCHECK(vex_byte0_ == 0xC4 || vex_byte0_ == 0xC5);
+ byte checked = vex_byte0_ == 0xC4 ? vex_byte2_ : vex_byte1_;
+ return ~(checked >> 3) & 0xF;
}
char float_size_code() { return "sd"[vex_w()]; }
@@ -743,62 +740,62 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xa9:
+ case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xb9:
+ case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9b:
+ case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xab:
+ case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbb:
+ case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9d:
+ case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xad:
+ case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbd:
+ case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9f:
+ case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xaf:
+ case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbf:
+ case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shlx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -850,6 +847,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
current++;
break;
+ case 0x21:
+ AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
case 0x22:
AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -874,22 +878,22 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -917,30 +921,39 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5b:
+ case 0x5B:
AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x6f:
+ AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
+ case 0x7f:
+ AppendToBuffer("vmovdqu ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
default:
UnimplementedInstruction();
}
@@ -949,22 +962,22 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
const char* mnem = "?";
switch (opcode) {
- case 0xf2:
+ case 0xF2:
AppendToBuffer("andn %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf5:
+ case 0xF5:
AppendToBuffer("bzhi %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("bextr %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf3:
+ case 0xF3:
switch (regop) {
case 1:
mnem = "blsr";
@@ -989,17 +1002,17 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pdep %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf6:
+ case 0xF6:
AppendToBuffer("mulx %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shrx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -1011,12 +1024,12 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pext %s,%s,", NameOfCPURegister(regop),
NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("sarx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
@@ -1028,10 +1041,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf0:
+ case 0xF0:
AppendToBuffer("rorx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *current & 0x1f);
+ AppendToBuffer(",%d", *current & 0x1F);
current += 1;
break;
default:
@@ -1041,6 +1054,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x52:
AppendToBuffer("vrsqrtps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1103,6 +1120,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current++;
break;
}
+ case 0xC6:
+ AppendToBuffer("vshufps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
+ break;
default:
UnimplementedInstruction();
}
@@ -1451,12 +1475,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x2E /*cs*/) {
branch_hint = "predicted not taken";
data++;
- } else if (*data == 0xC4 && *(data + 1) >= 0xc0) {
+ } else if (*data == 0xC4 && *(data + 1) >= 0xC0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
vex_byte2_ = *(data + 2);
data += 3;
- } else if (*data == 0xC5 && *(data + 1) >= 0xc0) {
+ } else if (*data == 0xC5 && *(data + 1) >= 0xC0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
data += 2;
@@ -1628,7 +1652,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("%s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
}
- } else if (f0byte == 0x2e) {
+ } else if (f0byte == 0x2E) {
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@@ -1732,6 +1756,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
data += PrintRightOperand(data);
+ } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
+ AppendToBuffer("lfence");
+ data += 3;
} else {
UnimplementedInstruction();
}
@@ -1825,7 +1852,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x66: // prefix
while (*data == 0x66) data++;
- if (*data == 0xf && data[1] == 0x1f) {
+ if (*data == 0xF && data[1] == 0x1F) {
AppendToBuffer("nop"); // 0x66 prefix
} else if (*data == 0x39) {
data++;
@@ -1964,6 +1991,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightOperand(data);
AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
data++;
+ } else if (*data == 0x21) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@@ -2261,6 +2296,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x5F:
mnem = "maxsd";
break;
+ case 0x7C:
+ mnem = "haddps";
+ break;
}
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index f0f9ec0a30..9edad9a44c 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -44,8 +44,6 @@ const Register LoadDescriptor::SlotRegister() { return eax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
-
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
@@ -204,6 +202,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 850424293a..ebc8b39ab9 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -489,29 +489,29 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(equal, kOperandIsNotASmi);
+ Check(equal, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
Push(object);
CmpObjectType(object, FIXED_ARRAY_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFixedArray);
+ Check(equal, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
CmpObjectType(object, JS_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFunction);
+ Check(equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -519,11 +519,11 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotABoundFunction);
+ Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
}
@@ -531,7 +531,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
{
Push(object);
@@ -552,7 +552,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Pop(object);
}
- Check(equal, kOperandIsNotAGeneratorObject);
+ Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -563,7 +563,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
j(equal, &done_checking);
cmp(FieldOperand(object, 0),
Immediate(isolate()->factory()->allocation_site_map()));
- Assert(equal, kExpectedUndefinedOrCell);
+ Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -572,7 +572,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmi);
+ Check(not_equal, AbortReason::kOperandIsASmi);
}
}
@@ -598,7 +598,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, kCodeObjectNotProperlyPatched);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
}
}
@@ -606,7 +606,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
- Check(equal, kStackFrameTypesMustMatch);
+ Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
leave();
}
@@ -738,7 +738,8 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
isolate());
mov(esi, Operand::StaticVariable(context_address));
#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
+ mov(Operand::StaticVariable(context_address),
+ Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
@@ -757,9 +758,11 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ push(Immediate(0)); // Padding.
+
// Link the current handler as the next handler.
ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
isolate());
@@ -891,7 +894,7 @@ void TurboAssembler::PrepareForTailCall(
if (FLAG_debug_code) {
cmp(esp, new_sp_reg);
- Check(below, kStackAccessBelowStackPointer);
+ Check(below, AbortReason::kStackAccessBelowStackPointer);
}
// Copy return address from caller's frame to current frame's return address
@@ -1447,16 +1450,15 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-
-void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, AbortReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L);
Abort(reason);
@@ -1478,9 +1480,9 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 342281d0b3..6242333847 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -73,18 +73,18 @@ class TurboAssembler : public Assembler {
void LeaveFrame(StackFrame::Type type);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
+ void Assert(Condition cc, AbortReason reason);
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(BailoutReason reason);
+ void AssertUnreachable(AbortReason reason);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
+ void Check(Condition cc, AbortReason reason);
// Check that the stack is aligned.
void CheckStackAlignment();
@@ -214,6 +214,8 @@ class TurboAssembler : public Assembler {
} \
}
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, const Operand&, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 076bde83e6..a55c1fefb8 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -5,50 +5,6 @@
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-
-} // namespace internal
-} // namespace v8
+// Since there is no simulator for the ia32 architecture this file is empty.
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index c4852d860d..dfd88862bd 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -20,6 +20,41 @@ using compiler::Node;
//////////////////// Private helpers.
+// Loads dataX field from the DataHandler object.
+Node* AccessorAssembler::LoadHandlerDataField(Node* handler, int data_index) {
+#ifdef DEBUG
+ Node* handler_map = LoadMap(handler);
+ Node* instance_type = LoadMapInstanceType(handler_map);
+#endif
+ CSA_ASSERT(this,
+ Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
+ InstanceTypeEqual(instance_type, STORE_HANDLER_TYPE)));
+ int offset = 0;
+ int minimum_size = 0;
+ switch (data_index) {
+ case 1:
+ offset = DataHandler::kData1Offset;
+ minimum_size = DataHandler::kSizeWithData1;
+ break;
+ case 2:
+ offset = DataHandler::kData2Offset;
+ minimum_size = DataHandler::kSizeWithData2;
+ break;
+ case 3:
+ offset = DataHandler::kData3Offset;
+ minimum_size = DataHandler::kSizeWithData3;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ USE(minimum_size);
+ CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ LoadMapInstanceSizeInWords(handler_map),
+ IntPtrConstant(minimum_size / kPointerSize)));
+ return LoadObjectField(handler, offset);
+}
+
Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
Node* receiver_map,
Label* if_handler,
@@ -128,10 +163,11 @@ void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
void AccessorAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
- ExitPoint* exit_point, ElementSupport support_elements) {
+ ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
+ ElementSupport support_elements) {
Comment("have_handler");
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
VARIABLE(var_smi_handler, MachineRepresentation::kTagged, handler);
Variable* vars[] = {&var_holder, &var_smi_handler};
@@ -146,14 +182,15 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&if_smi_handler);
{
HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
- miss, exit_point, false, support_elements);
+ handler, miss, exit_point, on_nonexistent,
+ support_elements);
}
BIND(&try_proto_handler);
{
GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
- HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point, false);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss, exit_point, ic_mode);
}
BIND(&call_handler);
@@ -220,8 +257,8 @@ Node* AccessorAssembler::LoadDescriptorValue(Node* map, Node* descriptor) {
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent,
+ const LoadICParameters* p, Node* holder, Node* smi_handler, Node* handler,
+ Label* miss, ExitPoint* exit_point, OnNonExistent on_nonexistent,
ElementSupport support_elements) {
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
@@ -301,8 +338,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Node* intptr_index = TryToIntptr(p->name, miss);
Node* length = LoadStringLengthAsWord(holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
- Node* code = StringCharCodeAt(holder, intptr_index);
- Node* result = StringFromCharCode(code);
+ TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
+ TNode<String> result = StringFromCharCode(code);
Return(result);
BIND(&if_oob);
@@ -361,10 +398,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&nonexistent);
// This is a handler for a load of a non-existent value.
- if (throw_reference_error_if_nonexistent) {
+ if (on_nonexistent == OnNonExistent::kThrowReferenceError) {
exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
p->name);
} else {
+ DCHECK_EQ(OnNonExistent::kReturnUndefined, on_nonexistent);
exit_point->Return(UndefinedConstant());
}
@@ -424,11 +462,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&api_getter);
{
Comment("api_getter");
- Node* context = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue1Offset));
- Node* call_handler_info = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue2Offset));
-
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ Node* call_handler_info = holder;
+
+ // Context is stored either in data2 or data3 field depending on whether
+ // the access check is enabled for this handler or not.
+ Node* context_cell = Select(
+ IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); },
+ MachineRepresentation::kTagged);
+
+ Node* context = LoadWeakCellValueUnchecked(context_cell);
Node* foreign =
LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset);
Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
@@ -538,104 +583,165 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
}
-void AccessorAssembler::HandleLoadICProtoHandlerCase(
- const LoadICParameters* p, Node* handler, Variable* var_holder,
- Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
- DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
-
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kDataOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
- LoadHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
- LoadHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, LoadHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
- Goto(&validity_cell_check_done);
-
- BIND(&validity_cell_check_done);
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
- Node* handler_flags = SmiUntag(smi_handler);
-
- Label check_prototypes(this);
- GotoIfNot(IsSetWord<LoadHandler::LookupOnReceiverBits>(handler_flags),
- &check_prototypes);
+// Performs actions common to both load and store handlers:
+// 1. Checks prototype validity cell.
+// 2. If |on_code_handler| is provided, then it checks if the sub handler is
+// a smi or code and if it's a code then it calls |on_code_handler| to
+// generate a code that handles Code handlers.
+// If |on_code_handler| is not provided, then only smi sub handler are
+// expected.
+// 3. Does access check on receiver if ICHandler::DoAccessCheckOnReceiverBits
+// bit is set in the smi handler.
+// 4. Does dictionary lookup on receiver if ICHandler::LookupOnReceiverBits bit
+// is set in the smi handler. If |on_found_on_receiver| is provided then
+// it calls it to generate a code that handles the "found on receiver case"
+// or just misses if the |on_found_on_receiver| is not provided.
+// 5. Falls through in a case of a smi handler which is returned from this
+// function (tagged!).
+// TODO(ishell): Remove templatezation once we move common bits from
+// Load/StoreHandler to the base class.
+template <typename ICHandler, typename ICParameters>
+Node* AccessorAssembler::HandleProtoHandler(
+ const ICParameters* p, Node* handler, const OnCodeHandler& on_code_handler,
+ const OnFoundOnReceiver& on_found_on_receiver, Label* miss,
+ ICMode ic_mode) {
+ //
+ // Check prototype validity cell.
+ //
{
- CSA_ASSERT(this, Word32BinaryNot(
- HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
- Node* properties = LoadSlowProperties(p->receiver);
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
- Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, p->name, &found,
- &var_name_index, &check_prototypes);
- BIND(&found);
- {
- VARIABLE(var_details, MachineRepresentation::kWord32);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
- Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
- exit_point->Return(value);
- }
+ Label done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, ICHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, SmiConstant(0)), &done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
+ miss);
+ Goto(&done);
+ BIND(&done);
}
- BIND(&check_prototypes);
- Node* maybe_holder_cell = LoadObjectField(handler, LoadHandler::kDataOffset);
- Label array_handler(this), tuple_handler(this);
- Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
-
- BIND(&tuple_handler);
+ //
+ // Check smi handler bits.
+ //
{
- Label load_from_cached_holder(this), done(this);
+ Node* smi_or_code_handler =
+ LoadObjectField(handler, ICHandler::kSmiHandlerOffset);
+ if (on_code_handler) {
+ Label if_smi_handler(this);
+ GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
- Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
+ CSA_ASSERT(this, IsCodeMap(LoadMap(smi_or_code_handler)));
+ on_code_handler(smi_or_code_handler);
- BIND(&load_from_cached_holder);
- {
- Label unwrap_cell(this), bind_holder(this);
- Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
+ BIND(&if_smi_handler);
+ } else {
+ CSA_ASSERT(this, TaggedIsSmi(smi_or_code_handler));
+ }
+ Node* handler_flags = SmiUntag(smi_or_code_handler);
+
+ // Lookup on receiver and access checks are not necessary for global ICs
+ // because in the former case the validity cell check guards modifications
+ // of the global object and the latter is not applicable to the global
+ // object.
+ int mask = ICHandler::LookupOnReceiverBits::kMask |
+ ICHandler::DoAccessCheckOnReceiverBits::kMask;
+ if (ic_mode == ICMode::kGlobalIC) {
+ CSA_ASSERT(this, IsClearWord(handler_flags, mask));
+ } else {
+ DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
- BIND(&unwrap_cell);
- {
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+ Label done(this), if_do_access_check(this), if_lookup_on_receiver(this);
+ GotoIf(IsClearWord(handler_flags, mask), &done);
+ // Only one of the bits can be set at a time.
+ CSA_ASSERT(this,
+ WordNotEqual(WordAnd(handler_flags, IntPtrConstant(mask)),
+ IntPtrConstant(mask)));
+ Branch(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &if_do_access_check, &if_lookup_on_receiver);
- var_holder->Bind(holder);
- Goto(&done);
+ BIND(&if_do_access_check);
+ {
+ Node* data2 = LoadHandlerDataField(handler, 2);
+ Node* expected_native_context = LoadWeakCellValue(data2, miss);
+ EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
+ miss);
}
- BIND(&bind_holder);
+ // Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC
+ // because prototype validity cell check already guards modifications of
+ // the global object.
+ BIND(&if_lookup_on_receiver);
{
- var_holder->Bind(maybe_holder_cell);
- Goto(&done);
+ DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
+ CSA_ASSERT(this, Word32BinaryNot(HasInstanceType(
+ p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+
+ Node* properties = LoadSlowProperties(p->receiver);
+ VARIABLE(var_name_index, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, p->name, &found,
+ &var_name_index, &done);
+ BIND(&found);
+ {
+ if (on_found_on_receiver) {
+ on_found_on_receiver(properties, var_name_index.value());
+ } else {
+ Goto(miss);
+ }
+ }
}
+
+ BIND(&done);
}
+ return smi_or_code_handler;
+ }
+}
- BIND(&done);
- var_smi_handler->Bind(smi_handler);
- Goto(if_smi_handler);
+void AccessorAssembler::HandleLoadICProtoHandler(
+ const LoadICParameters* p, Node* handler, Variable* var_holder,
+ Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+ Node* smi_handler = HandleProtoHandler<LoadHandler>(
+ p, handler,
+ // Code sub-handlers are not expected in LoadICs, so no |on_code_handler|.
+ nullptr,
+ // on_found_on_receiver
+ [=](Node* properties, Node* name_index) {
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ LoadPropertyFromNameDictionary(properties, name_index, &var_details,
+ &var_value);
+ Node* value =
+ CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, p->receiver, miss);
+ exit_point->Return(value);
+ },
+ miss, ic_mode);
+
+ Node* maybe_holder_cell = LoadHandlerDataField(handler, 1);
+
+ Label load_from_cached_holder(this), done(this);
+
+ Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
+
+ BIND(&load_from_cached_holder);
+ {
+ // For regular holders, having passed the receiver map check and the
+ // validity cell check implies that |holder| is alive. However, for
+ // global object receivers, the |maybe_holder_cell| may be cleared.
+ Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
+
+ var_holder->Bind(holder);
+ Goto(&done);
}
- BIND(&array_handler);
+ BIND(&done);
{
- exit_point->ReturnCallStub(
- CodeFactory::LoadICProtoArray(isolate(),
- throw_reference_error_if_nonexistent),
- p->context, p->receiver, p->name, p->slot, p->vector, handler);
+ var_smi_handler->Bind(smi_handler);
+ Goto(if_smi_handler);
}
}
@@ -657,94 +763,6 @@ void AccessorAssembler::EmitAccessCheck(Node* expected_native_context,
Branch(WordEqual(expected_token, current_token), can_access, miss);
}
-Node* AccessorAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
- Node* handler,
- Node* handler_length,
- Node* handler_flags,
- Label* miss) {
- VARIABLE(var_start_index, MachineType::PointerRepresentation(),
- IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
-
- Label can_access(this);
- GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
- &can_access);
- {
- // Skip this entry of a handler.
- var_start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
-
- int offset =
- FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
- Node* expected_native_context =
- LoadWeakCellValue(LoadObjectField(handler, offset), miss);
-
- EmitAccessCheck(expected_native_context, p->context, p->receiver,
- &can_access, miss);
- }
- BIND(&can_access);
-
- BuildFastLoop(var_start_index.value(), handler_length,
- [=](Node* current) {
- Node* prototype_cell =
- LoadFixedArrayElement(handler, current);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-
- Node* maybe_holder_cell =
- LoadFixedArrayElement(handler, LoadHandler::kDataIndex);
-
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->receiver);
- Label done(this);
- GotoIf(IsNull(maybe_holder_cell), &done);
-
- {
- Label unwrap_cell(this), bind_holder(this);
- Branch(IsWeakCell(maybe_holder_cell), &unwrap_cell, &bind_holder);
-
- BIND(&unwrap_cell);
- {
- // For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(maybe_holder_cell, miss);
-
- var_holder.Bind(holder);
- Goto(&done);
- }
-
- BIND(&bind_holder);
- {
- var_holder.Bind(maybe_holder_cell);
- Goto(&done);
- }
- }
-
- BIND(&done);
- return var_holder.value();
-}
-
-void AccessorAssembler::HandleLoadGlobalICHandlerCase(
- const LoadICParameters* pp, Node* handler, Label* miss,
- ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
- LoadICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver = LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
-
- VARIABLE(var_holder, MachineRepresentation::kTagged,
- LoadContextElement(native_context, Context::EXTENSION_INDEX));
- VARIABLE(var_smi_handler, MachineRepresentation::kTagged);
- Label if_smi_handler(this);
-
- HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point,
- throw_reference_error_if_nonexistent);
- BIND(&if_smi_handler);
- HandleLoadICSmiHandlerCase(
- &p, var_holder.value(), var_smi_handler.value(), miss, exit_point,
- throw_reference_error_if_nonexistent, kOnlyProperties);
-}
-
void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
Label* readonly) {
// Accessor properties never have the READ_ONLY attribute set.
@@ -768,7 +786,7 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
}
void AccessorAssembler::HandleStoreICHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements) {
Label if_smi_handler(this), if_nonsmi_handler(this);
Label if_proto_handler(this), if_element_handler(this), call_handler(this),
@@ -848,20 +866,12 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_nonsmi_handler);
{
Node* handler_map = LoadMap(handler);
- if (support_elements == kSupportElements) {
- GotoIf(IsTuple2Map(handler_map), &if_element_handler);
- }
GotoIf(IsWeakCellMap(handler_map), &store_global);
Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
}
- if (support_elements == kSupportElements) {
- BIND(&if_element_handler);
- HandleStoreICElementHandlerCase(p, handler, miss);
- }
-
BIND(&if_proto_handler);
- HandleStoreICProtoHandler(p, handler, miss, support_elements);
+ HandleStoreICProtoHandler(p, handler, miss, ic_mode, support_elements);
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
@@ -881,22 +891,6 @@ void AccessorAssembler::HandleStoreICHandlerCase(
}
}
-void AccessorAssembler::HandleStoreICElementHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss) {
- Comment("HandleStoreICElementHandlerCase");
- Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
-
- Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
- CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
-
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
-}
-
void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
Node* holder, Node* handler_word) {
Comment("accessor_store");
@@ -911,125 +905,75 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
}
void AccessorAssembler::HandleStoreICProtoHandler(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements) {
Comment("HandleStoreICProtoHandler");
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == StoreHandler::kDataOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
- StoreHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
- StoreHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, StoreHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
- Goto(&validity_cell_check_done);
-
- BIND(&validity_cell_check_done);
- Node* smi_or_code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
-
- Node* maybe_transition_cell =
- LoadObjectField(handler, StoreHandler::kDataOffset);
- Label array_handler(this), do_store(this);
-
- VARIABLE(var_transition_map_or_holder, MachineRepresentation::kTagged,
- maybe_transition_cell);
-
- Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &do_store);
-
- BIND(&array_handler);
- {
- VARIABLE(var_start_index, MachineType::PointerRepresentation(),
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex));
-
- Comment("array_handler");
- Label can_access(this);
- // Only Tuple3 handlers are allowed to have code handlers.
- CSA_ASSERT(this, TaggedIsSmi(smi_or_code));
- GotoIfNot(
- IsSetSmi(smi_or_code, StoreHandler::DoAccessCheckOnReceiverBits::kMask),
- &can_access);
+ OnCodeHandler on_code_handler;
+ if (support_elements == kSupportElements) {
+ // Code sub-handlers are expected only in KeyedStoreICs.
+ on_code_handler = [=](Node* code_handler) {
+ // This is either element store or transitioning element store.
+ Label if_element_store(this), if_transitioning_element_store(this);
+ Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store,
+ &if_transitioning_element_store);
+ BIND(&if_element_store);
+ {
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+ }
- {
- // Skip this entry of a handler.
- var_start_index.Bind(
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex + 1));
+ BIND(&if_transitioning_element_store);
+ {
+ Node* transition_map_cell = LoadHandlerDataField(handler, 1);
+ Node* transition_map = LoadWeakCellValue(transition_map_cell, miss);
+ CSA_ASSERT(this, IsMap(transition_map));
- int offset =
- FixedArray::OffsetOfElementAt(StoreHandler::kFirstPrototypeIndex);
- Node* expected_native_context =
- LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+ GotoIf(IsDeprecatedMap(transition_map), miss);
- EmitAccessCheck(expected_native_context, p->context, p->receiver,
- &can_access, miss);
- }
- BIND(&can_access);
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+ transition_map, p->value, p->slot, p->vector);
+ }
+ };
+ }
- Node* length = SmiUntag(maybe_transition_cell);
- BuildFastLoop(var_start_index.value(), length,
- [=](Node* current) {
- Node* prototype_cell =
- LoadFixedArrayElement(handler, current);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Node* smi_handler = HandleProtoHandler<StoreHandler>(
+ p, handler, on_code_handler,
+ // on_found_on_receiver
+ [=](Node* properties, Node* name_index) {
+ // TODO(ishell): combine with |found| case inside |if_store_normal|.
+ Node* details =
+ LoadDetailsByKeyIndex<NameDictionary>(properties, name_index);
+ // Check that the property is a writable data property (no accessor).
+ const int kTypeAndReadOnlyMask =
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ STATIC_ASSERT(kData == 0);
+ GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- Node* maybe_transition_cell =
- LoadFixedArrayElement(handler, StoreHandler::kDataIndex);
- var_transition_map_or_holder.Bind(maybe_transition_cell);
- Goto(&do_store);
- }
+ StoreValueByKeyIndex<NameDictionary>(properties, name_index, p->value);
+ Return(p->value);
+ },
+ miss, ic_mode);
Label if_transition_map(this), if_holder_object(this);
- BIND(&do_store);
- {
- Node* maybe_transition_cell = var_transition_map_or_holder.value();
-
- Label unwrap_cell(this);
- Branch(IsWeakCell(maybe_transition_cell), &unwrap_cell, &if_holder_object);
-
- BIND(&unwrap_cell);
- {
- Node* maybe_transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition_map_or_holder.Bind(maybe_transition);
- Branch(IsMap(maybe_transition), &if_transition_map, &if_holder_object);
- }
- }
+ Node* maybe_transition_or_holder_cell = LoadHandlerDataField(handler, 1);
+ Node* maybe_transition_or_holder =
+ LoadWeakCellValue(maybe_transition_or_holder_cell, miss);
+ Branch(IsMap(maybe_transition_or_holder), &if_transition_map,
+ &if_holder_object);
BIND(&if_transition_map);
{
Label if_transition_to_constant(this), if_store_normal(this);
Node* holder = p->receiver;
- Node* transition_map = var_transition_map_or_holder.value();
+ Node* transition_map = maybe_transition_or_holder;
GotoIf(IsDeprecatedMap(transition_map), miss);
-
- if (support_elements == kSupportElements) {
- Label if_smi_handler(this);
-
- GotoIf(TaggedIsSmi(smi_or_code), &if_smi_handler);
- Node* code_handler = smi_or_code;
- CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
-
- StoreTransitionDescriptor descriptor(isolate());
- TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
- transition_map, p->value, p->slot, p->vector);
-
- BIND(&if_smi_handler);
- }
-
- Node* smi_handler = smi_or_code;
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
@@ -1085,6 +1029,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&not_found);
{
Label slow(this);
+ Node* receiver_map = LoadMap(p->receiver);
+ InvalidateValidityCellIfPrototype(receiver_map);
+
Add<NameDictionary>(properties, p->name, p->value, &slow);
Return(p->value);
@@ -1098,9 +1045,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
{
Label if_store_global_proxy(this), if_api_setter(this), if_accessor(this),
if_native_data_property(this);
- Node* holder = var_transition_map_or_holder.value();
+ Node* holder = maybe_transition_or_holder;
- Node* smi_handler = smi_or_code;
CSA_ASSERT(this, TaggedIsSmi(smi_handler));
Node* handler_word = SmiUntag(smi_handler);
@@ -1135,10 +1081,18 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&if_api_setter);
{
Comment("api_setter");
- Node* context = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue1Offset));
- Node* call_handler_info = LoadWeakCellValueUnchecked(
- LoadObjectField(holder, Tuple2::kValue2Offset));
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ Node* call_handler_info = holder;
+
+ // Context is stored either in data2 or data3 field depending on whether
+ // the access check is enabled for this handler or not.
+ Node* context_cell = Select(
+ IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); },
+ MachineRepresentation::kTagged);
+
+ Node* context = LoadWeakCellValueUnchecked(context_cell);
Node* foreign = LoadObjectField(call_handler_info,
CallHandlerInfo::kJsCallbackOffset);
@@ -1746,35 +1700,6 @@ void AccessorAssembler::EmitElementLoad(
}
}
-void AccessorAssembler::CheckPrototype(Node* prototype_cell, Node* name,
- Label* miss) {
- Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
-
- Label done(this);
- Label if_property_cell(this), if_dictionary_object(this);
-
- // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
- Branch(IsPropertyCell(maybe_prototype), &if_property_cell,
- &if_dictionary_object);
-
- BIND(&if_dictionary_object);
- {
- CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
- NameDictionaryNegativeLookup(maybe_prototype, name, miss);
- Goto(&done);
- }
-
- BIND(&if_property_cell);
- {
- // Ensure the property cell still contains the hole.
- Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
- GotoIfNot(IsTheHole(value), miss);
- Goto(&done);
- }
-
- BIND(&done);
-}
-
void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
Label* miss) {
CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
@@ -1819,6 +1744,32 @@ void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
if_strict);
}
+void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
+ Node* bitfield2) {
+ Label is_prototype(this), cont(this);
+ if (bitfield2 == nullptr) {
+ bitfield2 = LoadMapBitField2(map);
+ }
+
+ Branch(IsSetWord32(bitfield2, Map::IsPrototypeMapBit::kMask), &is_prototype,
+ &cont);
+
+ BIND(&is_prototype);
+ {
+ Node* maybe_prototype_info =
+ LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+ // If there's no prototype info then there's nothing to invalidate.
+ GotoIf(TaggedIsSmi(maybe_prototype_info), &cont);
+
+ Node* function = ExternalConstant(
+ ExternalReference::invalidate_prototype_chains_function(isolate()));
+ CallCFunction1(MachineType::AnyTagged(), MachineType::AnyTagged(), function,
+ map);
+ Goto(&cont);
+ }
+ BIND(&cont);
+}
+
void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
Node* instance_type, Node* index,
Label* slow) {
@@ -1902,7 +1853,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// Check if the receiver has fast or slow properties.
Node* bitfield3 = LoadMapBitField3(receiver_map);
- GotoIf(IsSetWord32<Map::DictionaryMap>(bitfield3), &if_property_dictionary);
+ GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
+ &if_property_dictionary);
// Try looking up the property on the receiver; if unsuccessful, look
// for a handler in the stub cache.
@@ -2053,9 +2005,8 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
- Node* hash = Int32Add(hash_field, map32);
// Base the offset on a simple combination of name and map.
- hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+ Node* hash = Int32Add(hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
@@ -2310,13 +2261,12 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
// if (!(has_prototype_slot() && !has_non_instance_prototype())) use generic
// property loading mechanism.
- int has_prototype_slot_mask = 1 << Map::kHasPrototypeSlot;
- int has_non_instance_prototype_mask = 1 << Map::kHasNonInstancePrototype;
GotoIfNot(
- Word32Equal(Word32And(LoadMapBitField(receiver_map),
- Int32Constant(has_prototype_slot_mask |
- has_non_instance_prototype_mask)),
- Int32Constant(has_prototype_slot_mask)),
+ Word32Equal(
+ Word32And(LoadMapBitField(receiver_map),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask |
+ Map::HasNonInstancePrototypeBit::kMask)),
+ Int32Constant(Map::HasPrototypeSlotBit::kMask)),
&not_function_prototype);
Return(LoadJSFunctionPrototype(receiver, &miss));
BIND(&not_function_prototype);
@@ -2337,120 +2287,97 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
}
}
-void AccessorAssembler::LoadICProtoArray(
- const LoadICParameters* p, Node* handler,
- bool throw_reference_error_if_nonexistent) {
- Label miss(this);
- CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+void AccessorAssembler::LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point,
+ ParameterMode slot_mode) {
+ Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
+ LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
+ &try_handler, &miss, slot_mode);
- ExitPoint direct_exit(this);
-
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- Node* handler_flags = SmiUntag(smi_handler);
-
- Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
-
- Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
- handler_flags, &miss);
-
- HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, &direct_exit,
- throw_reference_error_if_nonexistent,
- kOnlyProperties);
+ BIND(&try_handler);
+ LoadGlobalIC_TryHandlerCase(vector, slot, lazy_context, lazy_name,
+ typeof_mode, exit_point, &miss, slot_mode);
BIND(&miss);
{
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
+ Comment("LoadGlobalIC_MissCase");
+ TNode<Context> context = lazy_context();
+ TNode<Name> name = lazy_name();
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
+ ParameterToTagged(slot, slot_mode), vector);
}
}
void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
- Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
- Label* miss, ParameterMode slot_mode) {
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
+ Label* try_handler, Label* miss, ParameterMode slot_mode) {
Comment("LoadGlobalIC_TryPropertyCellCase");
- Node* weak_cell = LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
- CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
-
- // Load value or try handler case if the {weak_cell} is cleared.
- Node* property_cell = LoadWeakCellValue(weak_cell, try_handler);
- CSA_ASSERT(this, IsPropertyCell(property_cell));
-
- Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
- GotoIf(WordEqual(value, TheHoleConstant()), miss);
- exit_point->Return(value);
-}
-
-void AccessorAssembler::LoadGlobalIC_TryHandlerCase(const LoadICParameters* pp,
- TypeofMode typeof_mode,
- ExitPoint* exit_point,
- Label* miss) {
- Comment("LoadGlobalIC_TryHandlerCase");
-
- Label call_handler(this), non_smi(this);
-
- Node* handler = LoadFeedbackVectorSlot(pp->vector, pp->slot, kPointerSize,
- SMI_PARAMETERS);
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- miss);
-
- GotoIfNot(TaggedIsSmi(handler), &non_smi);
-
- bool throw_reference_error_if_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF;
+ Label if_lexical_var(this), if_property_cell(this);
+ TNode<Object> maybe_weak_cell =
+ LoadFeedbackVectorSlot(vector, slot, 0, slot_mode);
+ Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+ BIND(&if_property_cell);
{
- LoadICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
- Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- HandleLoadICSmiHandlerCase(&p, holder, handler, miss, exit_point,
- throw_reference_error_if_nonexistent,
- kOnlyProperties);
- }
+ TNode<WeakCell> weak_cell = CAST(maybe_weak_cell);
- BIND(&non_smi);
- GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
-
- HandleLoadGlobalICHandlerCase(pp, handler, miss, exit_point,
- throw_reference_error_if_nonexistent);
+ // Load value or try handler case if the {weak_cell} is cleared.
+ TNode<PropertyCell> property_cell =
+ CAST(LoadWeakCellValue(weak_cell, try_handler));
+ TNode<Object> value =
+ LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), miss);
+ exit_point->Return(value);
+ }
- BIND(&call_handler);
+ BIND(&if_lexical_var);
{
- LoadWithVectorDescriptor descriptor(isolate());
- Node* native_context = LoadNativeContext(pp->context);
- Node* receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
- exit_point->ReturnCallStub(descriptor, handler, pp->context, receiver,
- pp->name, pp->slot, pp->vector);
+ Comment("Load lexical variable");
+ TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
+ TNode<IntPtrT> context_index =
+ Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ TNode<IntPtrT> slot_index =
+ Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ TNode<Context> context = lazy_context();
+ TNode<Context> script_context = LoadScriptContext(context, context_index);
+ TNode<Object> result = LoadContextElement(script_context, slot_index);
+ exit_point->Return(result);
}
}
-void AccessorAssembler::LoadGlobalIC_MissCase(const LoadICParameters* p,
- ExitPoint* exit_point) {
- Comment("LoadGlobalIC_MissCase");
+void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode, ExitPoint* exit_point, Label* miss,
+ ParameterMode slot_mode) {
+ Comment("LoadGlobalIC_TryHandlerCase");
- exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context,
- p->name, p->slot, p->vector);
-}
+ Label call_handler(this), non_smi(this);
-void AccessorAssembler::LoadGlobalIC(const LoadICParameters* p,
- TypeofMode typeof_mode) {
- // Must be kept in sync with Interpreter::BuildLoadGlobal.
+ Node* handler = LoadFeedbackVectorSlot(vector, slot, kPointerSize, slot_mode);
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ miss);
- ExitPoint direct_exit(this);
+ OnNonExistent on_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF
+ ? OnNonExistent::kThrowReferenceError
+ : OnNonExistent::kReturnUndefined;
- Label try_handler(this), miss(this);
- LoadGlobalIC_TryPropertyCellCase(p->vector, p->slot, &direct_exit,
- &try_handler, &miss);
+ TNode<Context> context = lazy_context();
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSGlobalProxy> receiver =
+ CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
+ Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- BIND(&try_handler);
- LoadGlobalIC_TryHandlerCase(p, typeof_mode, &direct_exit, &miss);
+ LoadICParameters p(context, receiver, lazy_name(),
+ ParameterToTagged(slot, slot_mode), vector, holder);
- BIND(&miss);
- LoadGlobalIC_MissCase(p, &direct_exit);
+ HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
+ on_nonexistent);
}
void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
@@ -2472,7 +2399,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
BIND(&if_handler);
{
HandleLoadICHandlerCase(p, var_handler.value(), &miss, &direct_exit,
- kSupportElements);
+ ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kSupportElements);
}
BIND(&try_polymorphic);
@@ -2642,7 +2570,8 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
{
ExitPoint direct_exit(this);
HandleLoadICHandlerCase(p, var_handler.value(), &miss, &direct_exit,
- kOnlyProperties);
+ ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kOnlyProperties);
}
BIND(&miss);
@@ -2669,7 +2598,8 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&if_handler);
{
Comment("StoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss,
+ ICMode::kNonGlobalIC);
}
BIND(&try_polymorphic);
@@ -2710,6 +2640,61 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
}
+void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
+ Label if_lexical_var(this), if_property_cell(this);
+ Node* maybe_weak_cell =
+ LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
+ Branch(TaggedIsSmi(maybe_weak_cell), &if_lexical_var, &if_property_cell);
+
+ BIND(&if_property_cell);
+ {
+ Label try_handler(this), miss(this, Label::kDeferred);
+ Node* property_cell = LoadWeakCellValue(maybe_weak_cell, &try_handler);
+
+ ExitPoint direct_exit(this);
+ StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
+ &miss);
+
+ BIND(&try_handler);
+ {
+ Comment("StoreGlobalIC_try_handler");
+ Node* handler = LoadFeedbackVectorSlot(pp->vector, pp->slot, kPointerSize,
+ SMI_PARAMETERS);
+
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ &miss);
+
+ StoreICParameters p = *pp;
+ DCHECK_NULL(p.receiver);
+ Node* native_context = LoadNativeContext(p.context);
+ p.receiver =
+ LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
+
+ HandleStoreICHandlerCase(&p, handler, &miss, ICMode::kGlobalIC);
+ }
+
+ BIND(&miss);
+ {
+ TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context, pp->value,
+ pp->slot, pp->vector, pp->name);
+ }
+ }
+
+ BIND(&if_lexical_var);
+ {
+ Comment("Store lexical variable");
+ TNode<IntPtrT> lexical_handler = SmiUntag(maybe_weak_cell);
+ TNode<IntPtrT> context_index =
+ Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ TNode<IntPtrT> slot_index =
+ Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ TNode<Context> script_context =
+ LoadScriptContext(CAST(pp->context), context_index);
+ StoreContextElement(script_context, slot_index, CAST(pp->value));
+ Return(pp->value);
+ }
+}
+
void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
Node* value,
ExitPoint* exit_point,
@@ -2724,6 +2709,11 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(Node* property_cell,
LoadObjectField(property_cell, PropertyCell::kValueOffset);
Node* details = LoadAndUntagToWord32ObjectField(property_cell,
PropertyCell::kDetailsOffset);
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
+ CSA_ASSERT(this,
+ Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
+ Int32Constant(kData)));
+
Node* type = DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
Label constant(this), store(this), not_smi(this);
@@ -2789,7 +2779,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&if_handler);
{
Comment("KeyedStoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss,
+ ICMode::kNonGlobalIC, kSupportElements);
}
BIND(&try_polymorphic);
@@ -2904,21 +2895,6 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
-void AccessorAssembler::GenerateLoadICProtoArray(
- bool throw_reference_error_if_nonexistent) {
- typedef LoadICProtoArrayDescriptor Descriptor;
-
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* handler = Parameter(Descriptor::kHandler);
- Node* context = Parameter(Descriptor::kContext);
-
- LoadICParameters p(context, receiver, name, slot, vector);
- LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
-}
-
void AccessorAssembler::GenerateLoadField() {
typedef LoadFieldDescriptor Descriptor;
@@ -2951,8 +2927,12 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
- LoadICParameters p(context, nullptr, name, slot, vector);
- LoadGlobalIC(&p, typeof_mode);
+ ExitPoint direct_exit(this);
+ LoadGlobalIC(CAST(vector), slot,
+ // lazy_context
+ [=] { return CAST(context); },
+ // lazy_name
+ [=] { return CAST(name); }, typeof_mode, &direct_exit);
}
void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
@@ -3020,6 +3000,33 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
KeyedLoadICPolymorphicName(&p);
}
+void AccessorAssembler::GenerateStoreGlobalIC() {
+ typedef StoreGlobalWithVectorDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ StoreICParameters p(context, nullptr, name, value, slot, vector);
+ StoreGlobalIC(&p);
+}
+
+void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
+ typedef StoreGlobalDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadFeedbackVectorForStub();
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
+ TailCallStub(callable, context, name, value, slot, vector);
+}
+
void AccessorAssembler::GenerateStoreIC() {
typedef StoreWithVectorDescriptor Descriptor;
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index b11ff738c1..46376dd6a8 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -34,8 +34,8 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedLoadIC_PolymorphicName();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
-
- void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+ void GenerateStoreGlobalIC();
+ void GenerateStoreGlobalICTrampoline();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
@@ -56,32 +56,35 @@ class AccessorAssembler : public CodeStubAssembler {
struct LoadICParameters {
LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
- Node* vector)
+ Node* vector, Node* holder = nullptr)
: context(context),
receiver(receiver),
name(name),
slot(slot),
- vector(vector) {}
+ vector(vector),
+ holder(holder ? holder : receiver) {}
Node* context;
Node* receiver;
Node* name;
Node* slot;
Node* vector;
+ Node* holder;
};
- void LoadGlobalIC_TryPropertyCellCase(
- Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
- Label* miss, ParameterMode slot_mode = SMI_PARAMETERS);
- void LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
- TypeofMode typeof_mode,
- ExitPoint* exit_point, Label* miss);
- void LoadGlobalIC_MissCase(const LoadICParameters* p, ExitPoint* exit_point);
+ void LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
+ ExitPoint* exit_point,
+ ParameterMode slot_mode = SMI_PARAMETERS);
// Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame
// construction on common paths.
void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
+ // Loads dataX field from the DataHandler object.
+ Node* LoadHandlerDataField(Node* handler, int data_index);
+
protected:
struct StoreICParameters : public LoadICParameters {
StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
@@ -91,14 +94,17 @@ class AccessorAssembler : public CodeStubAssembler {
Node* value;
};
+ enum class ICMode { kNonGlobalIC, kGlobalIC };
enum ElementSupport { kOnlyProperties, kSupportElements };
void HandleStoreICHandlerCase(
- const StoreICParameters* p, Node* handler, Label* miss,
+ const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements = kOnlyProperties);
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
void BranchIfStrictMode(Node* vector, Node* slot, Label* if_strict);
+ void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
+
private:
// Stub generation entry points.
@@ -112,13 +118,12 @@ class AccessorAssembler : public CodeStubAssembler {
Node* LoadDescriptorValue(Node* map, Node* descriptor);
void LoadIC_Uninitialized(const LoadICParameters* p);
- void LoadICProtoArray(const LoadICParameters* p, Node* handler,
- bool throw_reference_error_if_nonexistent);
- void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+
void KeyedLoadIC(const LoadICParameters* p);
void KeyedLoadICGeneric(const LoadICParameters* p);
void KeyedLoadICPolymorphicName(const LoadICParameters* p);
void StoreIC(const StoreICParameters* p);
+ void StoreGlobalIC(const StoreICParameters* p);
void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
ExitPoint* exit_point, Label* miss);
void KeyedStoreIC(const StoreICParameters* p);
@@ -134,23 +139,23 @@ class AccessorAssembler : public CodeStubAssembler {
Label* if_miss, int min_feedback_capacity);
// LoadIC implementation.
-
+ enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
void HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
- ExitPoint* exit_point, ElementSupport support_elements = kOnlyProperties);
+ ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
+ OnNonExistent on_nonexistent = OnNonExistent::kReturnUndefined,
+ ElementSupport support_elements = kOnlyProperties);
void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
- Node* smi_handler, Label* miss,
+ Node* smi_handler, Node* handler, Label* miss,
ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent,
+ OnNonExistent on_nonexistent,
ElementSupport support_elements);
- void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
- Variable* var_holder,
- Variable* var_smi_handler,
- Label* if_smi_handler, Label* miss,
- ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent);
+ void HandleLoadICProtoHandler(const LoadICParameters* p, Node* handler,
+ Variable* var_holder, Variable* var_smi_handler,
+ Label* if_smi_handler, Label* miss,
+ ExitPoint* exit_point, ICMode ic_mode);
void HandleLoadField(Node* holder, Node* handler_word,
Variable* var_double_value, Label* rebox_double,
@@ -159,23 +164,26 @@ class AccessorAssembler : public CodeStubAssembler {
void EmitAccessCheck(Node* expected_native_context, Node* context,
Node* receiver, Label* can_access, Label* miss);
- Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
- Node* handler_length, Node* handler_flags,
- Label* miss);
-
// LoadGlobalIC implementation.
- void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
- Label* miss, ExitPoint* exit_point,
- bool throw_reference_error_if_nonexistent);
+ void LoadGlobalIC_TryPropertyCellCase(
+ TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
+ Label* try_handler, Label* miss,
+ ParameterMode slot_mode = SMI_PARAMETERS);
+
+ void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector, Node* slot,
+ const LazyNode<Context>& lazy_context,
+ const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode,
+ ExitPoint* exit_point, Label* miss,
+ ParameterMode slot_mode);
// StoreIC implementation.
- void HandleStoreICElementHandlerCase(const StoreICParameters* p,
- Node* handler, Label* miss);
-
void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
- Label* miss, ElementSupport support_elements);
+ Label* miss, ICMode ic_mode,
+ ElementSupport support_elements);
// If |transition| is nullptr then the normal field store is generated or
// transitioning store otherwise.
void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
@@ -208,6 +216,16 @@ class AccessorAssembler : public CodeStubAssembler {
// Low-level helpers.
+ typedef std::function<void(Node* code_handler)> OnCodeHandler;
+ typedef std::function<void(Node* properties, Node* name_index)>
+ OnFoundOnReceiver;
+
+ template <typename ICHandler, typename ICParameters>
+ Node* HandleProtoHandler(const ICParameters* p, Node* handler,
+ const OnCodeHandler& on_code_handler,
+ const OnFoundOnReceiver& on_found_on_receiver,
+ Label* miss, ICMode ic_mode);
+
Node* GetLanguageMode(Node* vector, Node* slot);
Node* PrepareValueForStore(Node* handler_word, Node* holder,
@@ -229,7 +247,6 @@ class AccessorAssembler : public CodeStubAssembler {
Label* rebox_double, Variable* var_double_value,
Label* unimplemented_elements_kind, Label* out_of_bounds,
Label* miss, ExitPoint* exit_point);
- void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
// Stub cache access helpers.
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 5c8e0511cf..cf2577a01f 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -9,10 +9,17 @@
#include "src/field-index-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/data-handler-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
+TYPE_CHECKER(LoadHandler, LOAD_HANDLER_TYPE)
+CAST_ACCESSOR(LoadHandler)
+
// Decodes kind from Smi-handler.
LoadHandler::Kind LoadHandler::GetHandlerKind(Smi* smi_handler) {
return KindBits::decode(smi_handler->value());
@@ -76,28 +83,6 @@ Handle<Smi> LoadHandler::LoadModuleExport(Isolate* isolate, int index) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<Smi> LoadHandler::EnableAccessCheckOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
-#ifdef DEBUG
- Kind kind = KindBits::decode(config);
- DCHECK_NE(kElement, kind);
-#endif
- config = DoAccessCheckOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
-Handle<Smi> LoadHandler::EnableLookupOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
-#ifdef DEBUG
- Kind kind = KindBits::decode(config);
- DCHECK_NE(kElement, kind);
-#endif
- config = LookupOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
Handle<Smi> LoadHandler::LoadNonExistent(Isolate* isolate) {
int config = KindBits::encode(kNonExistent);
return handle(Smi::FromInt(config), isolate);
@@ -125,6 +110,9 @@ Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
return handle(Smi::FromInt(config), isolate);
}
+TYPE_CHECKER(StoreHandler, STORE_HANDLER_TYPE)
+CAST_ACCESSOR(StoreHandler)
+
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
int config = KindBits::encode(kGlobalProxy);
return handle(Smi::FromInt(config), isolate);
@@ -140,13 +128,6 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
return handle(Smi::FromInt(config), isolate);
}
-Handle<Smi> StoreHandler::EnableAccessCheckOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler) {
- int config = smi_handler->value();
- config = DoAccessCheckOnReceiverBits::update(config, true);
- return handle(Smi::FromInt(config), isolate);
-}
-
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
@@ -230,25 +211,15 @@ Handle<Smi> StoreHandler::StoreApiSetter(Isolate* isolate,
// static
WeakCell* StoreHandler::GetTransitionCell(Object* handler) {
- if (handler->IsTuple3()) {
- STATIC_ASSERT(kDataOffset == Tuple3::kValue1Offset);
- WeakCell* cell = WeakCell::cast(Tuple3::cast(handler)->value1());
- DCHECK(!cell->cleared());
- return cell;
- }
-
- DCHECK(handler->IsFixedArrayExact());
- WeakCell* cell = WeakCell::cast(FixedArray::cast(handler)->get(kDataIndex));
+ DCHECK(handler->IsStoreHandler());
+ WeakCell* cell = WeakCell::cast(StoreHandler::cast(handler)->data1());
DCHECK(!cell->cleared());
return cell;
}
-// static
-bool StoreHandler::IsHandler(Object* maybe_handler) {
- return maybe_handler->IsFixedArrayExact() || maybe_handler->IsTuple3();
-}
-
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_IC_HANDLER_CONFIGURATION_INL_H_
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 077bdb49e1..19614a4322 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -13,76 +13,67 @@ namespace internal {
namespace {
-template <bool fill_array = true>
-int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name,
- Handle<FixedArray> array, int first_index) {
- if (!holder.is_null() && holder->map() == *receiver_map) return 0;
+template <typename BitField>
+Handle<Smi> SetBitFieldValue(Isolate* isolate, Handle<Smi> smi_handler,
+ typename BitField::FieldType value) {
+ int config = smi_handler->value();
+ config = BitField::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
- HandleScope scope(isolate);
+// TODO(ishell): Remove templatezation once we move common bits from
+// Load/StoreHandler to the base class.
+template <typename ICHandler, bool fill_handler = true>
+int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
+ Handle<Smi>* smi_handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2) {
int checks_count = 0;
+ // Holder-is-receiver case itself does not add entries unless there is an
+ // optional data2 value provided.
- if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ if (receiver_map->IsPrimitiveMap() ||
+ receiver_map->is_access_check_needed()) {
+ DCHECK(!receiver_map->IsJSGlobalObjectMap());
// The validity cell check for primitive and global proxy receivers does
// not guarantee that certain native context ever had access to other
// native context. However, a handler created for one native context could
// be used in other native context through the megamorphic stub cache.
// So we record the original native context to which this handler
// corresponds.
- if (fill_array) {
+ if (fill_handler) {
Handle<Context> native_context = isolate->native_context();
- array->set(first_index + checks_count, native_context->self_weak_cell());
+ handler->set_data2(native_context->self_weak_cell());
+ } else {
+ // Enable access checks on receiver.
+ typedef typename ICHandler::DoAccessCheckOnReceiverBits Bit;
+ *smi_handler = SetBitFieldValue<Bit>(isolate, *smi_handler, true);
}
checks_count++;
-
- } else if (receiver_map->IsJSGlobalObjectMap()) {
- // If we are creating a handler for [Load/Store]GlobalIC then we need to
- // check that the property did not appear in the global object.
- if (fill_array) {
- Handle<JSGlobalObject> global = isolate->global_object();
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ if (!fill_handler) {
+ // Enable lookup on receiver.
+ typedef typename ICHandler::LookupOnReceiverBits Bit;
+ *smi_handler = SetBitFieldValue<Bit>(isolate, *smi_handler, true);
}
- checks_count++;
}
-
- // Create/count entries for each global or dictionary prototype appeared in
- // the prototype chain contains from receiver till holder.
- PrototypeIterator::WhereToEnd end = name->IsPrivate()
- ? PrototypeIterator::END_AT_NON_HIDDEN
- : PrototypeIterator::END_AT_NULL;
- for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
- iter.Advance()) {
- Handle<JSReceiver> current =
- PrototypeIterator::GetCurrent<JSReceiver>(iter);
- if (holder.is_identical_to(current)) break;
- Handle<Map> current_map(current->map(), isolate);
-
- if (current_map->IsJSGlobalObjectMap()) {
- if (fill_array) {
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- array->set(first_index + checks_count, *weak_cell);
- }
- checks_count++;
-
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (fill_array) {
- DCHECK_EQ(NameDictionary::kNotFound,
- current->property_dictionary()->FindEntry(name));
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate);
- array->set(first_index + checks_count, *weak_cell);
+ if (fill_handler) {
+ handler->set_data1(*data1);
+ }
+ Handle<Object> data2;
+ if (maybe_data2.ToHandle(&data2)) {
+ if (fill_handler) {
+ // This value will go either to data2 or data3 slot depending on whether
+ // data2 slot is already occupied by native context.
+ if (checks_count == 0) {
+ handler->set_data2(*data2);
+ } else {
+ DCHECK_EQ(1, checks_count);
+ handler->set_data3(*data2);
}
- checks_count++;
}
+ checks_count++;
}
return checks_count;
}
@@ -93,10 +84,24 @@ int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
// checked.
// Returns -1 if the handler has to be compiled or the number of prototype
// checks otherwise.
-int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Name> name) {
- return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
- Handle<FixedArray>(), 0);
+template <typename ICHandler>
+int GetPrototypeCheckCount(
+ Isolate* isolate, Handle<Smi>* smi_handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+ DCHECK_NOT_NULL(smi_handler);
+ return InitPrototypeChecksImpl<ICHandler, false>(isolate, Handle<ICHandler>(),
+ smi_handler, receiver_map,
+ holder, data1, maybe_data2);
+}
+
+template <typename ICHandler>
+void InitPrototypeChecks(
+ Isolate* isolate, Handle<ICHandler> handler, Handle<Map> receiver_map,
+ Handle<JSReceiver> holder, Handle<Object> data1,
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+ InitPrototypeChecksImpl<ICHandler, true>(
+ isolate, handler, nullptr, receiver_map, holder, data1, maybe_data2);
}
} // namespace
@@ -105,65 +110,46 @@ int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSReceiver> holder,
- Handle<Name> name,
Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data) {
- int checks_count =
- GetPrototypeCheckCount(isolate, receiver_map, holder, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
+ MaybeHandle<Object> maybe_data1,
+ MaybeHandle<Object> maybe_data2) {
+ Handle<Object> data1;
+ if (!maybe_data1.ToHandle(&data1)) {
+ data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
}
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- DCHECK(!validity_cell.is_null());
+ int checks_count = GetPrototypeCheckCount<LoadHandler>(
+ isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
- Handle<Object> data;
- if (!maybe_data.ToHandle(&data)) {
- data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
+ if (validity_cell.is_null()) {
+ // Although in case of kApiGetter we load from receiver we still have to
+ // use the "prototype" shape of a handler in order to provide additional
+ // data to the dispatcher.
+ DCHECK_EQ(kApiGetter, GetHandlerKind(*smi_handler));
+ validity_cell = handle(Smi::kZero, isolate);
}
- if (checks_count == 0) {
- return isolate->factory()->NewTuple3(data, smi_handler, validity_cell,
- TENURED);
- }
- Handle<FixedArray> handler_array(isolate->factory()->NewFixedArray(
- kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *data);
- InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ int data_count = 1 + checks_count;
+ Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, holder, data1,
+ maybe_data2);
+ return handler;
}
// static
Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
Handle<Object> holder,
- Handle<Name> name,
Handle<Smi> smi_handler) {
- Handle<JSReceiver> end; // null handle
- int checks_count = GetPrototypeCheckCount(isolate, receiver_map, end, name);
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->IsPrimitiveMap() ||
- receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
- } else if (receiver_map->is_dictionary_map() &&
- !receiver_map->IsJSGlobalObjectMap()) {
- smi_handler = EnableLookupOnReceiver(isolate, smi_handler);
- }
+ Handle<JSReceiver> end; // null handle, means full prototype chain lookup.
+ Handle<Object> data1 = holder;
+ int checks_count = GetPrototypeCheckCount<LoadHandler>(
+ isolate, &smi_handler, receiver_map, end, data1);
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
@@ -174,18 +160,13 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
validity_cell = handle(Smi::kZero, isolate);
}
- Factory* factory = isolate->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(holder, smi_handler, validity_cell, TENURED);
- }
- Handle<FixedArray> handler_array(factory->NewFixedArray(
- LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *holder);
- InitPrototypeChecks(isolate, receiver_map, end, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ int data_count = 1 + checks_count;
+ Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
+
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, end, data1);
+ return handler;
}
// static
@@ -218,7 +199,11 @@ Handle<Object> StoreHandler::StoreElementTransition(
validity_cell = handle(Smi::kZero, isolate);
}
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- return isolate->factory()->NewTuple3(cell, stub, validity_cell, TENURED);
+ Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(1);
+ handler->set_smi_handler(*stub);
+ handler->set_validity_cell(*validity_cell);
+ handler->set_data1(*cell);
+ return handler;
}
Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
@@ -248,19 +233,16 @@ Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
// static
Handle<Object> StoreHandler::StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data) {
- int checks_count =
- GetPrototypeCheckCount(isolate, receiver_map, holder, name);
-
- DCHECK_LE(0, checks_count);
-
- if (receiver_map->is_access_check_needed()) {
- DCHECK(!receiver_map->is_dictionary_map());
- DCHECK_LE(1, checks_count); // For native context.
- smi_handler = EnableAccessCheckOnReceiver(isolate, smi_handler);
+ Handle<Smi> smi_handler, MaybeHandle<Object> maybe_data1,
+ MaybeHandle<Object> maybe_data2) {
+ Handle<Object> data1;
+ if (!maybe_data1.ToHandle(&data1)) {
+ data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
}
+ int checks_count = GetPrototypeCheckCount<StoreHandler>(
+ isolate, &smi_handler, receiver_map, holder, data1, maybe_data2);
+
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
if (validity_cell.is_null()) {
@@ -268,23 +250,15 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
validity_cell = handle(Smi::kZero, isolate);
}
- Handle<Object> data;
- if (!maybe_data.ToHandle(&data)) {
- data = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
- }
+ int data_count = 1 + checks_count;
+ Handle<StoreHandler> handler =
+ isolate->factory()->NewStoreHandler(data_count);
- Factory* factory = isolate->factory();
- if (checks_count == 0) {
- return factory->NewTuple3(data, smi_handler, validity_cell, TENURED);
- }
- Handle<FixedArray> handler_array(
- factory->NewFixedArray(kFirstPrototypeIndex + checks_count, TENURED));
- handler_array->set(kSmiHandlerIndex, *smi_handler);
- handler_array->set(kValidityCellIndex, *validity_cell);
- handler_array->set(kDataIndex, *data);
- InitPrototypeChecks(isolate, receiver_map, holder, name, handler_array,
- kFirstPrototypeIndex);
- return handler_array;
+ handler->set_smi_handler(*smi_handler);
+ handler->set_validity_cell(*validity_cell);
+ InitPrototypeChecks(isolate, handler, receiver_map, holder, data1,
+ maybe_data2);
+ return handler;
}
// static
@@ -297,68 +271,35 @@ Handle<Object> StoreHandler::StoreGlobal(Isolate* isolate,
Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name) {
+ Handle<JSReceiver> receiver) {
Handle<Smi> smi_handler = StoreProxy(isolate);
if (receiver.is_identical_to(proxy)) return smi_handler;
Handle<WeakCell> holder_cell = isolate->factory()->NewWeakCell(proxy);
- return StoreThroughPrototype(isolate, receiver_map, proxy, name, smi_handler,
+ return StoreThroughPrototype(isolate, receiver_map, proxy, smi_handler,
holder_cell);
}
Object* StoreHandler::ValidHandlerOrNull(Object* raw_handler, Name* name,
Handle<Map>* out_transition) {
- STATIC_ASSERT(kValidityCellOffset == Tuple3::kValue3Offset);
-
Smi* valid = Smi::FromInt(Map::kPrototypeChainValid);
- if (raw_handler->IsTuple3()) {
- // Check validity cell.
- Tuple3* handler = Tuple3::cast(raw_handler);
+ DCHECK(raw_handler->IsStoreHandler());
- Object* raw_validity_cell = handler->value3();
- // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
- // (which counts as valid).
- if (raw_validity_cell->IsCell() &&
- Cell::cast(raw_validity_cell)->value() != valid) {
- return nullptr;
- }
+ // Check validity cell.
+ StoreHandler* handler = StoreHandler::cast(raw_handler);
- } else {
- DCHECK(raw_handler->IsFixedArrayExact());
- FixedArray* handler = FixedArray::cast(raw_handler);
-
- // Check validity cell.
- Object* value = Cell::cast(handler->get(kValidityCellIndex))->value();
- if (value != valid) return nullptr;
-
- // Check prototypes.
- Heap* heap = handler->GetHeap();
- Isolate* isolate = heap->isolate();
- Handle<Name> name_handle(name, isolate);
- for (int i = kFirstPrototypeIndex; i < handler->length(); i++) {
- // This mirrors AccessorAssembler::CheckPrototype.
- WeakCell* prototype_cell = WeakCell::cast(handler->get(i));
- if (prototype_cell->cleared()) return nullptr;
- HeapObject* maybe_prototype = HeapObject::cast(prototype_cell->value());
- if (maybe_prototype->IsPropertyCell()) {
- Object* value = PropertyCell::cast(maybe_prototype)->value();
- if (value != heap->the_hole_value()) return nullptr;
- } else {
- DCHECK(maybe_prototype->map()->is_dictionary_map());
- // Do a negative dictionary lookup.
- NameDictionary* dict =
- JSObject::cast(maybe_prototype)->property_dictionary();
- int number = dict->FindEntry(isolate, name_handle);
- if (number != NameDictionary::kNotFound) {
- PropertyDetails details = dict->DetailsAt(number);
- if (details.IsReadOnly()) return nullptr;
- if (details.kind() == PropertyKind::kAccessor) return nullptr;
- break;
- }
- }
- }
+ Object* raw_validity_cell = handler->validity_cell();
+ // |raw_valitity_cell| can be Smi::kZero if no validity cell is required
+ // (which counts as valid).
+ if (raw_validity_cell->IsCell() &&
+ Cell::cast(raw_validity_cell)->value() != valid) {
+ return nullptr;
}
+ // We use this ValidHandlerOrNull() function only for transitioning store
+ // handlers which are not applicable to receivers that require access checks.
+ DCHECK(handler->smi_handler()->IsSmi());
+ DCHECK(
+ !DoAccessCheckOnReceiverBits::decode(Smi::ToInt(handler->smi_handler())));
// Check if the transition target is deprecated.
WeakCell* target_cell = GetTransitionCell(raw_handler);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 3d0990e826..514a5ed5fa 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -9,14 +9,25 @@
#include "src/field-index.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/data-handler.h"
#include "src/utils.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
-// A set of bit fields representing Smi handlers for loads.
-class LoadHandler {
+// A set of bit fields representing Smi handlers for loads and a HeapObject
+// that represents load handlers that can't be encoded in a Smi.
+// TODO(ishell): move to load-handler.h
+class LoadHandler final : public DataHandler {
public:
+ DECL_CAST(LoadHandler)
+
+ DECL_PRINTER(LoadHandler)
+ DECL_VERIFIER(LoadHandler)
+
enum Kind {
kElement,
kIndexedString,
@@ -72,7 +83,8 @@ class LoadHandler {
//
// Encoding when KindBits contains kElement or kIndexedString.
//
- class AllowOutOfBoundsBits : public BitField<bool, KindBits::kNext, 1> {};
+ class AllowOutOfBoundsBits
+ : public BitField<bool, LookupOnReceiverBits::kNext, 1> {};
//
// Encoding when KindBits contains kElement.
@@ -88,23 +100,9 @@ class LoadHandler {
//
// Encoding when KindBits contains kModuleExport.
//
- class ExportsIndexBits : public BitField<unsigned, KindBits::kNext,
- kSmiValueSize - KindBits::kNext> {};
-
- // The layout of an Tuple3 handler representing a load of a field from
- // prototype when prototype chain checks do not include non-existing lookups
- // or access checks.
- static const int kDataOffset = Tuple3::kValue1Offset;
- static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
- static const int kValidityCellOffset = Tuple3::kValue3Offset;
-
- // The layout of an array handler representing a load of a field from
- // prototype when prototype chain checks include non-existing lookups and
- // access checks.
- static const int kSmiHandlerIndex = 0;
- static const int kValidityCellIndex = 1;
- static const int kDataIndex = 2;
- static const int kFirstPrototypeIndex = 3;
+ class ExportsIndexBits
+ : public BitField<unsigned, LookupOnReceiverBits::kNext,
+ kSmiValueSize - LookupOnReceiverBits::kNext> {};
// Decodes kind from Smi-handler.
static inline Kind GetHandlerKind(Smi* smi_handler);
@@ -149,7 +147,7 @@ class LoadHandler {
// needed (e.g., for "nonexistent"), null_value() may be passed in.
static Handle<Object> LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- Handle<Object> holder, Handle<Name> name,
+ Handle<Object> holder,
Handle<Smi> smi_handler);
// Creates a data handler that represents a prototype chain check followed
@@ -157,8 +155,9 @@ class LoadHandler {
// Can be used only if GetPrototypeCheckCount() returns non negative value.
static Handle<Object> LoadFromPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data = MaybeHandle<Object>());
+ Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
// Creates a Smi-handler for loading a non-existent property. Works only as
// a part of prototype chain check.
@@ -177,22 +176,18 @@ class LoadHandler {
// Decodes the KeyedAccessLoadMode from a {handler}.
static KeyedAccessLoadMode GetKeyedAccessLoadMode(Object* handler);
-
- private:
- // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableAccessCheckOnReceiver(
- Isolate* isolate, Handle<Smi> smi_handler);
-
- // Sets LookupOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableLookupOnReceiver(Isolate* isolate,
- Handle<Smi> smi_handler);
};
-// A set of bit fields representing Smi handlers for stores.
-class StoreHandler {
+// A set of bit fields representing Smi handlers for stores and a HeapObject
+// that represents store handlers that can't be encoded in a Smi.
+// TODO(ishell): move to store-handler.h
+class StoreHandler final : public DataHandler {
public:
+ DECL_CAST(StoreHandler)
+
+ DECL_PRINTER(StoreHandler)
+ DECL_VERIFIER(StoreHandler)
+
enum Kind {
kElement,
kField,
@@ -213,21 +208,24 @@ class StoreHandler {
enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
- static inline bool IsHandler(Object* maybe_handler);
-
// Applicable to kGlobalProxy, kProxy kinds.
// Defines whether access rights check should be done on receiver object.
class DoAccessCheckOnReceiverBits
: public BitField<bool, KindBits::kNext, 1> {};
+ // Defines whether a lookup should be done on receiver object before
+ // proceeding to the prototype chain. Applicable to named property kinds only
+ // when storing through prototype chain. Ignored when storing to holder.
+ class LookupOnReceiverBits
+ : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+
// Applicable to kField, kTransitionToField and kTransitionToConstant
// kinds.
// Index of a value entry in the descriptor array.
- class DescriptorBits
- : public BitField<unsigned, DoAccessCheckOnReceiverBits::kNext,
- kDescriptorIndexBitCount> {};
+ class DescriptorBits : public BitField<unsigned, LookupOnReceiverBits::kNext,
+ kDescriptorIndexBitCount> {};
//
// Encoding when KindBits contains kTransitionToConstant.
//
@@ -249,24 +247,10 @@ class StoreHandler {
// Make sure we don't overflow the smi.
STATIC_ASSERT(FieldIndexBits::kNext <= kSmiValueSize);
- // The layout of an Tuple3 handler representing a transitioning store
- // when prototype chain checks do not include non-existing lookups or access
- // checks.
- static const int kDataOffset = Tuple3::kValue1Offset;
- static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
- static const int kValidityCellOffset = Tuple3::kValue3Offset;
-
static inline WeakCell* GetTransitionCell(Object* handler);
static Object* ValidHandlerOrNull(Object* handler, Name* name,
Handle<Map>* out_transition);
- // The layout of an array handler representing a transitioning store
- // when prototype chain checks include non-existing lookups and access checks.
- static const int kSmiHandlerIndex = 0;
- static const int kValidityCellIndex = 1;
- static const int kDataIndex = 2;
- static const int kFirstPrototypeIndex = 3;
-
// Creates a Smi-handler for storing a field to fast object.
static inline Handle<Smi> StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
@@ -289,8 +273,9 @@ class StoreHandler {
static Handle<Object> StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Name> name, Handle<Smi> smi_handler,
- MaybeHandle<Object> data = MaybeHandle<Object>());
+ Handle<Smi> smi_handler,
+ MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
+ MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
static Handle<Object> StoreElementTransition(Isolate* isolate,
Handle<Map> receiver_map,
@@ -299,8 +284,7 @@ class StoreHandler {
static Handle<Object> StoreProxy(Isolate* isolate, Handle<Map> receiver_map,
Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- Handle<Name> name);
+ Handle<JSReceiver> receiver);
// Creates a handler for storing a property to the property cell of a global
// object.
@@ -317,11 +301,6 @@ class StoreHandler {
static inline Handle<Smi> StoreProxy(Isolate* isolate);
private:
- // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
- // check is a part of a prototype chain check.
- static inline Handle<Smi> EnableAccessCheckOnReceiver(
- Isolate* isolate, Handle<Smi> smi_handler);
-
static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation,
@@ -342,4 +321,6 @@ class StoreHandler {
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_IC_HANDLER_CONFIGURATION_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index e705d38679..d6fa23611e 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -41,8 +41,7 @@ Address IC::raw_constant_pool() const {
bool IC::IsHandler(Object* object) {
- return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
- object->IsTuple3() || object->IsFixedArrayExact() ||
+ return (object->IsSmi() && (object != nullptr)) || object->IsDataHandler() ||
object->IsWeakCell() || object->IsCode();
}
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 7e3e6556a1..62a2e7cf59 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -255,7 +255,7 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// This is a contextual access, always just update the handler and stay
// monomorphic.
- if (IsLoadGlobalIC()) return true;
+ if (IsGlobalIC()) return true;
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
@@ -395,6 +395,11 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
if (IsLoadGlobalIC()) {
LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
nexus->ConfigureHandlerMode(handler);
+
+ } else if (IsStoreGlobalIC()) {
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ nexus->ConfigureHandlerMode(handler);
+
} else {
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
@@ -408,7 +413,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
ObjectHandles* handlers) {
- DCHECK(!IsLoadGlobalIC());
+ DCHECK(!IsGlobalIC());
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
nexus()->ConfigurePolymorphic(name, maps, handlers);
@@ -486,10 +491,16 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
return ReferenceError(name);
}
- if (FLAG_use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
- LoadScriptContextFieldStub stub(isolate(), &lookup_result);
- PatchCache(name, stub.GetCode());
+ if (FLAG_use_ic) {
+ LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+ if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
+ TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
+ } else {
+ // Given combination of indices can't be encoded, so use slow stub.
+ TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_SlowStub);
+ PatchCache(name, slow_stub());
+ }
TRACE_IC("LoadGlobalIC", name);
}
return result;
@@ -623,7 +634,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
break;
case RECOMPUTE_HANDLER:
case MONOMORPHIC:
- if (IsLoadGlobalIC()) {
+ if (IsGlobalIC()) {
UpdateMonomorphicIC(handler, name);
break;
}
@@ -664,7 +675,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
code = LoadHandler::LoadFullChain(isolate(), receiver_map(),
isolate()->factory()->null_value(),
- lookup->name(), smi_handler);
+ smi_handler);
} else {
if (IsLoadGlobalIC()) {
if (lookup->TryLookupCachedProperty()) {
@@ -754,7 +765,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonMaskingInterceptorDH);
return LoadHandler::LoadFullChain(isolate(), map, holder_ref,
- lookup->name(), smi_handler);
+ smi_handler);
}
if (receiver_is_holder) {
@@ -765,7 +776,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptorFromPrototypeDH);
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::ACCESSOR: {
@@ -831,12 +842,10 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
call_optimization.api_call_info());
- Handle<Tuple2> data =
- isolate()->factory()->NewTuple2(context_cell, data_cell, TENURED);
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, data);
+ isolate(), map, holder, smi_handler, data_cell, context_cell);
}
if (holder->HasFastProperties()) {
@@ -851,8 +860,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
smi_handler = LoadHandler::LoadGlobal(isolate());
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ smi_handler, cell);
} else {
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -862,7 +871,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
@@ -882,7 +891,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(),
LoadIC_LoadNativeDataPropertyFromPrototypeDH);
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::DATA: {
@@ -896,8 +905,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
smi_handler = LoadHandler::LoadGlobal(isolate());
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, lookup->name(), smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(isolate(), map, holder,
+ smi_handler, cell);
}
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -920,7 +929,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadIntegerIndexedExoticDH);
@@ -933,7 +942,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
return smi_handler;
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder_proxy,
- lookup->name(), smi_handler);
+ smi_handler);
}
case LookupIterator::ACCESS_CHECK:
case LookupIterator::NOT_FOUND:
@@ -957,8 +966,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
key = handle(Smi::FromInt(int_value), isolate);
}
}
- } else if (key->IsUndefined(isolate)) {
- key = isolate->factory()->undefined_string();
} else if (key->IsString()) {
key = isolate->factory()->InternalizeString(Handle<String>::cast(key));
}
@@ -1287,15 +1294,13 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
return it->IsCacheableTransition();
}
-MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
- Handle<Name> name,
+MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
Handle<Object> value) {
- DCHECK(object->IsJSGlobalObject());
DCHECK(name->IsString());
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = isolate()->global_object();
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -1304,7 +1309,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
- return TypeError(MessageTemplate::kConstAssign, object, name);
+ return TypeError(MessageTemplate::kConstAssign, global, name);
}
Handle<Object> previous_value =
@@ -1316,17 +1321,24 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Object> object,
return ReferenceError(name);
}
- if (FLAG_use_ic && StoreScriptContextFieldStub::Accepted(&lookup_result)) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreScriptContextFieldStub);
- StoreScriptContextFieldStub stub(isolate(), &lookup_result);
- PatchCache(name, stub.GetCode());
+ if (FLAG_use_ic) {
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
+ TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
+ } else {
+ // Given combination of indices can't be encoded, so use slow stub.
+ TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_SlowStub);
+ PatchCache(name, slow_stub());
+ }
+ TRACE_IC("StoreGlobalIC", name);
}
script_context->set(lookup_result.slot_index, *value);
return value;
}
- return StoreIC::Store(object, name, value);
+ return StoreIC::Store(global, name, value);
}
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
@@ -1381,7 +1393,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode,
MaybeHandle<Object> cached_handler) {
- if (state() == UNINITIALIZED) {
+ if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
@@ -1394,6 +1406,17 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (!cached_handler.is_null()) {
handler = cached_handler.ToHandleChecked();
} else if (LookupForWrite(lookup, value, store_mode)) {
+ if (IsStoreGlobalIC()) {
+ if (lookup->state() == LookupIterator::DATA &&
+ lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
+ DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
+ // Now update the cell in the feedback vector.
+ StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
+ nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ TRACE_IC("StoreGlobalIC", lookup->name());
+ return;
+ }
+ }
if (created_new_transition_) {
// The first time a transition is performed, there's a good chance that
// it won't be taken again, so don't bother creating a handler.
@@ -1432,8 +1455,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(lookup->transition_cell());
Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), store_target, lookup->name(),
- smi_handler, cell);
+ isolate(), receiver_map(), store_target, smi_handler, cell);
return handler;
}
// Currently not handled by CompileStoreTransition.
@@ -1457,7 +1479,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler, cell);
+ isolate(), receiver_map(), holder, smi_handler, cell);
TransitionsAccessor(receiver_map())
.UpdateHandler(*lookup->name(), *handler);
return handler;
@@ -1511,8 +1533,8 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
if (receiver.is_identical_to(holder)) return smi_handler;
TRACE_HANDLER_STATS(isolate(),
StoreIC_StoreNativeDataPropertyOnPrototypeDH);
- return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler);
+ return StoreHandler::StoreThroughPrototype(isolate(), receiver_map(),
+ holder, smi_handler);
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
@@ -1539,12 +1561,10 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
isolate()->factory()->NewWeakCell(context);
Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
call_optimization.api_call_info());
- Handle<Tuple2> data = isolate()->factory()->NewTuple2(
- context_cell, data_cell, TENURED);
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreApiSetterOnPrototypeDH);
return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler,
- data);
+ isolate(), receiver_map(), holder, smi_handler, data_cell,
+ context_cell);
}
TRACE_GENERIC_IC("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
@@ -1562,8 +1582,8 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
if (receiver.is_identical_to(holder)) return smi_handler;
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreAccessorOnPrototypeDH);
- return StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, lookup->name(), smi_handler);
+ return StoreHandler::StoreThroughPrototype(isolate(), receiver_map(),
+ holder, smi_handler);
}
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
@@ -1614,7 +1634,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSReceiver>::cast(lookup->GetReceiver());
Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
return StoreHandler::StoreProxy(isolate(), receiver_map(), holder,
- receiver, lookup->name());
+ receiver);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1788,6 +1808,7 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
return StoreHandler::StoreProxy(isolate());
}
+ // TODO(ishell): move to StoreHandler::StoreElement().
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
@@ -1809,7 +1830,10 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (validity_cell.is_null()) return stub;
- return isolate()->factory()->NewTuple2(validity_cell, stub, TENURED);
+ Handle<StoreHandler> handler = isolate()->factory()->NewStoreHandler(0);
+ handler->set_validity_cell(*validity_cell);
+ handler->set_smi_handler(*stub);
+ return handler;
}
void KeyedStoreIC::StoreElementPolymorphicHandlers(
@@ -2172,10 +2196,12 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else if (IsStoreGlobalICKind(kind)) {
- StoreICNexus nexus(vector, vector_slot);
+ DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
+ receiver = isolate->global_object();
+ StoreGlobalICNexus nexus(vector, vector_slot);
StoreGlobalIC ic(isolate, &nexus);
ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
} else {
DCHECK(IsKeyedStoreICKind(kind));
KeyedStoreICNexus nexus(vector, vector_slot);
@@ -2185,6 +2211,22 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
}
}
+RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<Name> key = args.at<Name>(3);
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ StoreGlobalICNexus nexus(vector, vector_slot);
+ StoreGlobalIC ic(isolate, &nexus);
+ Handle<JSGlobalObject> global = isolate->global_object();
+ ic.UpdateState(global, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
+}
+
RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2192,9 +2234,19 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- Handle<Object> object = args.at(3);
CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
+#ifdef DEBUG
+ {
+ FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
+ DCHECK(IsStoreGlobalICKind(slot_kind));
+ Handle<Object> receiver = args.at(3);
+ DCHECK(receiver->IsJSGlobalProxy());
+ }
+#endif
+
+ Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
native_context->script_context_table());
@@ -2205,7 +2257,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstAssign, object, name));
+ isolate, NewTypeError(MessageTemplate::kConstAssign, global, name));
}
Handle<Object> previous_value =
@@ -2224,7 +2276,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
- Runtime::SetObjectProperty(isolate, object, name, value, language_mode));
+ Runtime::SetObjectProperty(isolate, global, name, value, language_mode));
}
// Used from ic-<arch>.cc.
@@ -2286,8 +2338,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
RUNTIME_FUNCTION(Runtime_Unreachable) {
UNREACHABLE();
- CHECK(false);
- return isolate->heap()->undefined_value();
}
@@ -2306,23 +2356,18 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
language_mode));
}
- Handle<AccessorInfo> callback(
+ Handle<AccessorInfo> info(
callback_or_cell->IsWeakCell()
? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
: AccessorInfo::cast(*callback_or_cell));
- DCHECK(callback->IsCompatibleReceiver(*receiver));
-
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorNameSetterCallback fun =
- FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
- DCHECK_NOT_NULL(fun);
+ DCHECK(info->IsCompatibleReceiver(*receiver));
ShouldThrow should_throw =
is_sloppy(language_mode) ? kDontThrow : kThrowOnError;
- PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
- *holder, should_throw);
- custom_args.Call(fun, name, value);
+ PropertyCallbackArguments arguments(isolate, info->data(), *receiver, *holder,
+ should_throw);
+ arguments.CallAccessorSetter(info, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -2344,14 +2389,10 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
isolate, receiver, Object::ConvertReceiver(isolate, receiver));
}
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+ Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*holder, kDontThrow);
-
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- Handle<Object> result = arguments.Call(getter, name);
+ Handle<Object> result = arguments.CallNamedGetter(interceptor, name);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2398,16 +2439,24 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
FeedbackSlot vector_slot = vector->ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
- DCHECK(receiver->HasNamedInterceptor());
- InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
+ // TODO(ishell): Cache interceptor_holder in the store handler like we do
+ // for LoadHandler::kInterceptor case.
+ Handle<JSObject> interceptor_holder = receiver;
+ if (receiver->IsJSGlobalProxy()) {
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ if (IsStoreGlobalICKind(kind)) {
+ interceptor_holder = Handle<JSObject>::cast(isolate->global_object());
+ }
+ }
+ DCHECK(interceptor_holder->HasNamedInterceptor());
+ Handle<InterceptorInfo> interceptor(interceptor_holder->GetNamedInterceptor(),
+ isolate);
+
DCHECK(!interceptor->non_masking());
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*receiver, kDontThrow);
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- Handle<Object> result = arguments.Call(setter, name, value);
+ Handle<Object> result = arguments.CallNamedSetter(interceptor, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.is_null()) return *value;
@@ -2435,13 +2484,11 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
DCHECK_GE(args.smi_at(1), 0);
uint32_t index = args.smi_at(1);
- InterceptorInfo* interceptor = receiver->GetIndexedInterceptor();
+ Handle<InterceptorInfo> interceptor(receiver->GetIndexedInterceptor(),
+ isolate);
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*receiver, kDontThrow);
-
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- Handle<Object> result = arguments.Call(getter, index);
+ Handle<Object> result = arguments.CallIndexedGetter(interceptor, index);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index acbfccd4c6..a63202395b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -119,6 +119,7 @@ class IC {
bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
void PatchCache(Handle<Name> name, Handle<Object> code);
FeedbackSlotKind kind() const { return kind_; }
+ bool IsGlobalIC() const { return IsLoadGlobalIC() || IsStoreGlobalIC(); }
bool IsLoadIC() const { return IsLoadICKind(kind_); }
bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
@@ -339,8 +340,7 @@ class StoreGlobalIC : public StoreIC {
StoreGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
: StoreIC(isolate, nexus) {}
- MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
- Handle<Name> name,
+ MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
protected:
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 4263dd8552..b9a11c2ec7 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -751,8 +751,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label stub_cache(this), fast_properties(this), dictionary_properties(this),
accessor(this), readonly(this);
Node* bitfield3 = LoadMapBitField3(receiver_map);
- Branch(IsSetWord32<Map::DictionaryMap>(bitfield3), &dictionary_properties,
- &fast_properties);
+ Branch(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
+ &dictionary_properties, &fast_properties);
BIND(&fast_properties);
{
@@ -795,38 +795,22 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
{
Comment("lookup transition");
VARIABLE(var_handler, MachineRepresentation::kTagged);
- Label tuple3(this), fixedarray(this), found_handler(this, &var_handler);
+ Label check_key(this), found_handler(this, &var_handler);
Node* maybe_handler =
LoadObjectField(receiver_map, Map::kTransitionsOrPrototypeInfoOffset);
GotoIf(TaggedIsSmi(maybe_handler), notfound);
- Node* handler_map = LoadMap(maybe_handler);
- GotoIf(WordEqual(handler_map, Tuple3MapConstant()), &tuple3);
- GotoIf(WordEqual(handler_map, FixedArrayMapConstant()), &fixedarray);
+ GotoIf(HasInstanceType(maybe_handler, STORE_HANDLER_TYPE), &check_key);
// TODO(jkummerow): Consider implementing TransitionArray search.
Goto(notfound);
- VARIABLE(var_transition_cell, MachineRepresentation::kTagged);
- Label check_key(this, &var_transition_cell);
- BIND(&tuple3);
- {
- var_transition_cell.Bind(
- LoadObjectField(maybe_handler, StoreHandler::kDataOffset));
- Goto(&check_key);
- }
-
- BIND(&fixedarray);
- {
- var_transition_cell.Bind(
- LoadFixedArrayElement(maybe_handler, StoreHandler::kDataIndex));
- Goto(&check_key);
- }
-
BIND(&check_key);
{
- Node* transition = LoadWeakCellValue(var_transition_cell.value(), slow);
+ Node* transition_cell =
+ LoadObjectField(maybe_handler, StoreHandler::kData1Offset);
+ Node* transition = LoadWeakCellValue(transition_cell, slow);
Node* transition_bitfield3 = LoadMapBitField3(transition);
- GotoIf(IsSetWord32<Map::Deprecated>(transition_bitfield3), slow);
+ GotoIf(IsSetWord32<Map::IsDeprecatedBit>(transition_bitfield3), slow);
Node* nof =
DecodeWord32<Map::NumberOfOwnDescriptorsBits>(transition_bitfield3);
Node* last_added = Int32Sub(nof, Int32Constant(1));
@@ -840,7 +824,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&found_handler);
{
Comment("KeyedStoreGeneric found transition handler");
- HandleStoreICHandlerCase(p, var_handler.value(), notfound);
+ HandleStoreICHandlerCase(p, var_handler.value(), notfound,
+ ICMode::kNonGlobalIC);
}
}
}
@@ -882,16 +867,16 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
{
CheckForAssociatedProtector(p->name, slow);
Label extensible(this);
- GotoIf(IsPrivateSymbol(p->name), &extensible);
Node* bitfield2 = LoadMapBitField2(receiver_map);
- Branch(IsSetWord32(bitfield2, 1 << Map::kIsExtensible), &extensible,
- slow);
+ GotoIf(IsPrivateSymbol(p->name), &extensible);
+ Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield2), &extensible, slow);
BIND(&extensible);
LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
&var_accessor_pair, &var_accessor_holder,
&readonly, slow);
Label add_dictionary_property_slow(this);
+ InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
Add<NameDictionary>(properties, p->name, p->value,
&add_dictionary_property_slow);
Return(p->value);
@@ -958,7 +943,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&found_handler);
{
Comment("KeyedStoreGeneric found handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss);
+ HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss,
+ ICMode::kNonGlobalIC);
}
BIND(&stub_cache_miss);
{
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index ecdf8c83e1..927c7c6f27 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -39,7 +39,7 @@ int StubCache::PrimaryOffset(Name* name, Map* map) {
uint32_t map_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
// Base the offset on a simple combination of name and map.
- uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
+ uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 4b3144b9ad..cd081edfb2 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -84,8 +84,7 @@ class StubCache {
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
- // Some magic number used in primary and secondary hash computations.
- static const int kPrimaryMagic = 0x3d532433;
+ // Some magic number used in the secondary hash computation.
static const int kSecondaryMagic = 0xb16ca6e5;
static int PrimaryOffsetForTesting(Name* name, Map* map) {
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index 291cce6fe6..d14c673b62 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -81,6 +81,8 @@ bool InitializeICU(const char* icu_data_file) {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(addr), &err);
+ // Never try to load ICU data from files.
+ udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
return err == U_ZERO_ERROR;
#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC
// Mac/Linux bundle the ICU data in.
@@ -110,6 +112,8 @@ bool InitializeICU(const char* icu_data_file) {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(g_icu_data_ptr), &err);
+ // Never try to load ICU data from files.
+ udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
return err == U_ZERO_ERROR;
#endif
#endif
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 2ebf561135..699b1bcbd4 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -79,17 +79,6 @@ action("inspector_injected_script") {
config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- cflags = []
- if (is_win) {
- cflags += [
- "/wd4267", # Truncation from size_t to int.
- "/wd4305", # Truncation from 'type1' to 'type2'.
- "/wd4324", # Struct padded due to declspec(align).
- "/wd4714", # Function marked forceinline not inlined.
- "/wd4800", # Value forced to bool.
- "/wd4996", # Deprecated function call.
- ]
- }
if (is_component_build) {
defines = [ "BUILDING_V8_SHARED" ]
}
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index db3c906262..3cfeff35c4 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -12,5 +12,8 @@ yangguo@chromium.org
per-file js_protocol.json=set noparent
per-file js_protocol.json=dgozman@chromium.org
per-file js_protocol.json=pfeldman@chromium.org
+per-file js_protocol.pdl=set noparent
+per-file js_protocol.pdl=dgozman@chromium.org
+per-file js_protocol.pdl=pfeldman@chromium.org
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index dd9067ca96..0849d44202 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -460,6 +460,10 @@ InjectedScript.prototype = {
if (InjectedScriptHost.subtype(o) === "proxy")
continue;
+ var typedArrays = subtype === "arraybuffer" ? InjectedScriptHost.typedArrayProperties(o) || [] : [];
+ for (var i = 0; i < typedArrays.length; i += 2)
+ addPropertyIfNeeded(descriptors, { name: typedArrays[i], value: typedArrays[i + 1], isOwn: true, enumerable: false, configurable: false, __proto__: null });
+
try {
if (skipGetOwnPropertyNames && o === object) {
if (!process(o, undefined, o.length))
@@ -586,15 +590,21 @@ InjectedScript.prototype = {
if (subtype === "node") {
var description = "";
- if (obj.nodeName)
- description = obj.nodeName.toLowerCase();
- else if (obj.constructor)
- description = obj.constructor.name.toLowerCase();
+ var nodeName = InjectedScriptHost.getProperty(obj, "nodeName");
+ if (nodeName) {
+ description = nodeName.toLowerCase();
+ } else {
+ var constructor = InjectedScriptHost.getProperty(obj, "constructor");
+ if (constructor)
+ description = (InjectedScriptHost.getProperty(constructor, "name") || "").toLowerCase();
+ }
- switch (obj.nodeType) {
+ var nodeType = InjectedScriptHost.getProperty(obj, "nodeType");
+ switch (nodeType) {
case 1 /* Node.ELEMENT_NODE */:
- description += obj.id ? "#" + obj.id : "";
- var className = obj.className;
+ var id = InjectedScriptHost.getProperty(obj, "id");
+ description += id ? "#" + id : "";
+ var className = InjectedScriptHost.getProperty(obj, "className");
description += (className && typeof className === "string") ? "." + className.trim().replace(/\s+/g, ".") : "";
break;
case 10 /*Node.DOCUMENT_TYPE_NODE */:
@@ -929,6 +939,10 @@ InjectedScript.RemoteObject.prototype = {
if ((subtype === "map" || subtype === "set") && descriptor.name === "size")
return true;
+ // Ignore ArrayBuffer previews
+ if (subtype === 'arraybuffer' && (descriptor.name === "[[Int8Array]]" || descriptor.name === "[[Uint8Array]]" || descriptor.name === "[[Int16Array]]" || descriptor.name === "[[Int32Array]]"))
+ return true;
+
// Never preview prototype properties.
if (!descriptor.isOwn)
return true;
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
index 9c5555b624..d293b8547d 100644
--- a/deps/v8/src/inspector/injected_script_externs.js
+++ b/deps/v8/src/inspector/injected_script_externs.js
@@ -108,6 +108,12 @@ InjectedScriptHostClass.prototype.getOwnPropertySymbols = function(obj) {}
*/
InjectedScriptHostClass.prototype.nativeAccessorDescriptor = function(obj, name) {}
+/**
+ * @param {!Object} arrayBuffer
+ * @return {Array<Object>|undefined}
+ */
+InjectedScriptHostClass.prototype.typedArrayProperties = function(arrayBuffer) {}
+
/** @type {!InjectedScriptHostClass} */
var InjectedScriptHost;
/** @type {!Window} */
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index ea573d11a6..a0f7fcd7ed 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -1,1205 +1,2966 @@
{
- "version": { "major": "1", "minor": "3" },
- "domains": [
- {
- "domain": "Schema",
- "description": "This domain is deprecated.",
- "deprecated": true,
- "types": [
- {
- "id": "Domain",
- "type": "object",
- "description": "Description of the protocol domain.",
- "properties": [
- { "name": "name", "type": "string", "description": "Domain name." },
- { "name": "version", "type": "string", "description": "Domain version." }
- ]
- }
- ],
- "commands": [
- {
- "name": "getDomains",
- "description": "Returns supported domains.",
- "handlers": ["browser", "renderer"],
- "returns": [
- { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
- ]
- }
- ]
- },
- {
- "domain": "Runtime",
- "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
- "types": [
- {
- "id": "ScriptId",
- "type": "string",
- "description": "Unique script identifier."
- },
- {
- "id": "RemoteObjectId",
- "type": "string",
- "description": "Unique object identifier."
- },
- {
- "id": "UnserializableValue",
- "type": "string",
- "enum": ["Infinity", "NaN", "-Infinity", "-0"],
- "description": "Primitive value which cannot be JSON-stringified."
- },
- {
- "id": "RemoteObject",
- "type": "object",
- "description": "Mirror object referencing original JavaScript object.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
- { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
- { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
- { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
- ]
- },
- {
- "id": "CustomPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "header", "type": "string"},
- { "name": "hasBody", "type": "boolean"},
- { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
- { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
- { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
- ]
- },
- {
- "id": "ObjectPreview",
- "type": "object",
- "experimental": true,
- "description": "Object containing abbreviated remote object value.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
- { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
- { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
- ]
- },
- {
- "id": "PropertyPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "name", "type": "string", "description": "Property name." },
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
- { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
- { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
- ]
- },
- {
- "id": "EntryPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
- { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
- ]
- },
- {
- "id": "PropertyDescriptor",
- "type": "object",
- "description": "Object property descriptor.",
- "properties": [
- { "name": "name", "type": "string", "description": "Property name or symbol description." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
- { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
- { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
- { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
- { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
- { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
- { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
- ]
- },
- {
- "id": "InternalPropertyDescriptor",
- "type": "object",
- "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
- "properties": [
- { "name": "name", "type": "string", "description": "Conventional property name." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
- ]
- },
- {
- "id": "CallArgument",
- "type": "object",
- "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
- "properties": [
- { "name": "value", "type": "any", "optional": true, "description": "Primitive value or serializable javascript object." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
- ]
- },
- {
- "id": "ExecutionContextId",
- "type": "integer",
- "description": "Id of an execution context."
- },
- {
- "id": "ExecutionContextDescription",
- "type": "object",
- "description": "Description of an isolated world.",
- "properties": [
- { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
- { "name": "origin", "type": "string", "description": "Execution context origin." },
- { "name": "name", "type": "string", "description": "Human readable name describing given context." },
- { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
- ]
- },
- {
- "id": "ExceptionDetails",
- "type": "object",
- "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
- "properties": [
- { "name": "exceptionId", "type": "integer", "description": "Exception id." },
- { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
- { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
- { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
- ]
- },
- {
- "id": "Timestamp",
- "type": "number",
- "description": "Number of milliseconds since epoch."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "description": "Stack entry for runtime errors and assertions.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
- ]
- },
- {
- "id": "StackTrace",
- "type": "object",
- "description": "Call frames for assertions or error messages.",
- "properties": [
- { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
- { "name": "parentId", "$ref": "StackTraceId", "optional": true, "experimental": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
- ]
- },
- {
- "id": "UniqueDebuggerId",
- "type": "string",
- "description": "Unique identifier of current debugger.",
- "experimental": true
- },
- {
- "id": "StackTraceId",
- "type": "object",
- "description": "If <code>debuggerId</code> is set stack trace comes from another debugger and can be resolved there. This allows to track cross-debugger calls. See <code>Runtime.StackTrace</code> and <code>Debugger.paused</code> for usages.",
- "properties": [
- { "name": "id", "type": "string" },
- { "name": "debuggerId", "$ref": "UniqueDebuggerId", "optional": true }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "evaluate",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on global object."
- },
- {
- "name": "awaitPromise",
- "parameters": [
- { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
- ],
- "description": "Add handler to promise with given promise object id."
- },
- {
- "name": "callFunctionOn",
- "parameters": [
- { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Identifier of the object to call function on. Either objectId or executionContextId should be specified." },
- { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "getProperties",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
- { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
- { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
- { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "releaseObject",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
- ],
- "description": "Releases remote object with given id."
- },
- {
- "name": "releaseObjectGroup",
- "parameters": [
- { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
- ],
- "description": "Releases all remote objects that belong to a given group."
- },
- {
- "name": "runIfWaitingForDebugger",
- "description": "Tells inspected instance to run if it was waiting for debugger to attach."
- },
- {
- "name": "enable",
- "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
- },
- {
- "name": "disable",
- "description": "Disables reporting of execution contexts creation."
- },
- {
- "name": "discardConsoleEntries",
- "description": "Discards collected exceptions and console API calls."
- },
- {
- "name": "setCustomObjectFormatterEnabled",
- "parameters": [
- {
- "name": "enabled",
- "type": "boolean"
- }
- ],
- "experimental": true
- },
- {
- "name": "compileScript",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to compile." },
- { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
- { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
- ],
- "returns": [
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Compiles expression."
- },
- {
- "name": "runScript",
- "parameters": [
- { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
- { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Runs script with given id in a given context."
- },
- {
- "name": "queryObjects",
- "parameters": [
- { "name": "prototypeObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the prototype to return objects for." }
- ],
- "returns": [
- { "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
- ]
- },
- {
- "name": "globalLexicalScopeNames",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to lookup global scope variables." }
- ],
- "returns": [
- { "name": "names", "type": "array", "items": { "type": "string" } }
- ],
- "description": "Returns all let, const and class variables from global scope."
- }
- ],
- "events": [
- {
- "name": "executionContextCreated",
- "parameters": [
- { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution context." }
- ],
- "description": "Issued when new execution context is created."
- },
- {
- "name": "executionContextDestroyed",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
- ],
- "description": "Issued when execution context is destroyed."
- },
- {
- "name": "executionContextsCleared",
- "description": "Issued when all executionContexts were cleared in browser"
- },
- {
- "name": "exceptionThrown",
- "description": "Issued when exception was thrown and unhandled.",
- "parameters": [
- { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
- ]
- },
- {
- "name": "exceptionRevoked",
- "description": "Issued when unhandled exception was revoked.",
- "parameters": [
- { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
- { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionThrown</code>." }
- ]
- },
- {
- "name": "consoleAPICalled",
- "description": "Issued when console API was called.",
- "parameters": [
- { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
- { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
- { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." },
- { "name": "context", "type": "string", "optional": true, "experimental": true, "description": "Console context descriptor for calls on non-default console context (not console.*): 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call on named context." }
- ]
- },
- {
- "name": "inspectRequested",
- "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
- "parameters": [
- { "name": "object", "$ref": "RemoteObject" },
- { "name": "hints", "type": "object" }
- ]
- }
- ]
- },
- {
- "domain": "Debugger",
- "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
- "dependencies": ["Runtime"],
- "types": [
- {
- "id": "BreakpointId",
- "type": "string",
- "description": "Breakpoint identifier."
- },
- {
- "id": "CallFrameId",
- "type": "string",
- "description": "Call frame identifier."
- },
- {
- "id": "Location",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "ScriptPosition",
- "experimental": true,
- "type": "object",
- "properties": [
- { "name": "lineNumber", "type": "integer" },
- { "name": "columnNumber", "type": "integer" }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "properties": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
- { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "description": "Location in the source code." },
- { "name": "location", "$ref": "Location", "description": "Location in the source code." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
- { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
- { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
- ],
- "description": "JavaScript call frame. Array of call frames form the call stack."
- },
- {
- "id": "Scope",
- "type": "object",
- "properties": [
- { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
- { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
- { "name": "name", "type": "string", "optional": true },
- { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
- { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
- ],
- "description": "Scope description."
- },
- {
- "id": "SearchMatch",
- "type": "object",
- "description": "Search match for resource.",
- "properties": [
- { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
- { "name": "lineContent", "type": "string", "description": "Line with match content." }
- ]
- },
- {
- "id": "BreakLocation",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." },
- { "name": "type", "type": "string", "enum": [ "debuggerStatement", "call", "return" ], "optional": true }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "returns": [
- { "name": "debuggerId", "$ref": "Runtime.UniqueDebuggerId", "experimental": true, "description": "Unique identifier of the debugger." }
- ],
- "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
- },
- {
- "name": "disable",
- "description": "Disables debugger for given page."
- },
- {
- "name": "setBreakpointsActive",
- "parameters": [
- { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
- ],
- "description": "Activates / deactivates all breakpoints on the page."
- },
- {
- "name": "setSkipAllPauses",
- "parameters": [
- { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
- ],
- "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
- },
- {
- "name": "setBreakpointByUrl",
- "parameters": [
- { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
- { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
- { "name": "scriptHash", "type": "string", "optional": true, "description": "Script hash of the resources to set breakpoint on." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
- ],
- "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
- },
- {
- "name": "setBreakpoint",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
- ],
- "description": "Sets JavaScript breakpoint at a given location."
- },
- {
- "name": "removeBreakpoint",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId" }
- ],
- "description": "Removes JavaScript breakpoint."
- },
- {
- "name": "getPossibleBreakpoints",
- "parameters": [
- { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
- { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range." },
- { "name": "restrictToFunction", "type": "boolean", "optional": true, "description": "Only consider locations which are in the same (non-nested) function as start." }
- ],
- "returns": [
- { "name": "locations", "type": "array", "items": { "$ref": "BreakLocation" }, "description": "List of the possible breakpoint locations." }
- ],
- "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same."
- },
- {
- "name": "continueToLocation",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." },
- { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true }
- ],
- "description": "Continues execution until specific location is reached."
- },
- {
- "name": "pauseOnAsyncCall",
- "parameters": [
- { "name": "parentStackTraceId", "$ref": "Runtime.StackTraceId", "description": "Debugger will pause when async call with given stack trace is started." }
- ],
- "experimental": true
- },
- {
- "name": "stepOver",
- "description": "Steps over the statement."
- },
- {
- "name": "stepInto",
- "parameters": [
- { "name": "breakOnAsyncCall", "type": "boolean", "optional": true, "experimental": true, "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled before next pause." }
- ],
- "description": "Steps into the function call."
- },
- {
- "name": "stepOut",
- "description": "Steps out of the function call."
- },
- {
- "name": "pause",
- "description": "Stops on the next JavaScript statement."
- },
- {
- "name": "scheduleStepIntoAsync",
- "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
- "experimental": true
- },
- {
- "name": "resume",
- "description": "Resumes JavaScript execution."
- },
- {
- "name": "getStackTrace",
- "parameters": [
- { "name": "stackTraceId", "$ref": "Runtime.StackTraceId" }
- ],
- "returns": [
- { "name": "stackTrace", "$ref": "Runtime.StackTrace" }
- ],
- "description": "Returns stack trace with given <code>stackTraceId</code>.",
- "experimental": true
- },
- {
- "name": "searchInContent",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
- { "name": "query", "type": "string", "description": "String to search for." },
- { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
- { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
- ],
- "description": "Searches for given string in script content."
- },
- {
- "name": "setScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
- { "name": "scriptSource", "type": "string", "description": "New content of the script." },
- { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
- { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
- ],
- "description": "Edits JavaScript source live."
- },
- {
- "name": "restartFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." }
- ],
- "description": "Restarts particular call frame from the beginning."
- },
- {
- "name": "getScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
- ],
- "returns": [
- { "name": "scriptSource", "type": "string", "description": "Script source." }
- ],
- "description": "Returns source for the script with given id."
- },
- {
- "name": "setPauseOnExceptions",
- "parameters": [
- { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
- ],
- "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
- },
- {
- "name": "evaluateOnCallFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
- { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on a given call frame."
- },
- {
- "name": "setVariableValue",
- "parameters": [
- { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
- { "name": "variableName", "type": "string", "description": "Variable name." },
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
- ],
- "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
- },
- {
- "name": "setReturnValue",
- "parameters": [
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New return value." }
- ],
- "experimental": true,
- "description": "Changes return value in top frame. Available only at return break position."
- },
- {
- "name": "setAsyncCallStackDepth",
- "parameters": [
- { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
- ],
- "description": "Enables or disables async call stacks tracking."
- },
- {
- "name": "setBlackboxPatterns",
- "parameters": [
- { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
- ],
- "experimental": true,
- "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
- },
- {
- "name": "setBlackboxedRanges",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
- { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
- ],
- "experimental": true,
- "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
- }
- ],
- "events": [
- {
- "name": "scriptParsed",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
- },
- {
- "name": "scriptFailedToParse",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine fails to parse the script."
- },
- {
- "name": "breakpointResolved",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
- { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
- ],
- "description": "Fired when breakpoint is resolved to an actual script and location."
- },
- {
- "name": "paused",
- "parameters": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
- { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
- { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "asyncCallStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Just scheduled async call will have this stack trace as parent stack during async execution. This field is available only after <code>Debugger.stepInto</code> call with <code>breakOnAsynCall</code> flag." }
- ],
- "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
- },
- {
- "name": "resumed",
- "description": "Fired when the virtual machine resumed execution."
- }
- ]
+ "version": {
+ "major": "1",
+ "minor": "3"
},
- {
- "domain": "Console",
- "description": "This domain is deprecated - use Runtime or Log instead.",
- "dependencies": ["Runtime"],
- "deprecated": true,
- "types": [
- {
- "id": "ConsoleMessage",
- "type": "object",
- "description": "Console message.",
- "properties": [
- { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
- { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
- { "name": "text", "type": "string", "description": "Message text." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
- { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
- { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
- },
- {
- "name": "disable",
- "description": "Disables console domain, prevents further console messages from being reported to the client."
- },
- {
- "name": "clearMessages",
- "description": "Does nothing."
- }
- ],
- "events": [
- {
- "name": "messageAdded",
- "parameters": [
- { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
- ],
- "description": "Issued when new console message is added."
- }
- ]
- },
- {
- "domain": "Profiler",
- "dependencies": ["Runtime", "Debugger"],
- "types": [
- {
- "id": "ProfileNode",
- "type": "object",
- "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
- "properties": [
- { "name": "id", "type": "integer", "description": "Unique id of the node." },
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "optional": true, "description": "Number of samples where this node was on top of the call stack." },
- { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
- { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "description": "An array of source position ticks." }
- ]
- },
- {
- "id": "Profile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
- { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
- { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
- { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
- { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
- ]
- },
- {
- "id": "PositionTickInfo",
- "type": "object",
- "description": "Specifies a number of samples attributed to a certain source position.",
- "properties": [
- { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
- { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
- ]
- },
- { "id": "CoverageRange",
- "type": "object",
- "description": "Coverage data for a source range.",
- "properties": [
- { "name": "startOffset", "type": "integer", "description": "JavaScript script source offset for the range start." },
- { "name": "endOffset", "type": "integer", "description": "JavaScript script source offset for the range end." },
- { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
- ]
- },
- { "id": "FunctionCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript function.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
- { "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
- ]
- },
- {
- "id": "ScriptCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
- ]
- },
- { "id": "TypeObject",
- "type": "object",
- "description": "Describes a type collected during runtime.",
- "properties": [
- { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." }
- ],
- "experimental": true
- },
- { "id": "TypeProfileEntry",
- "type": "object",
- "description": "Source offset and types for a parameter or return value.",
- "properties": [
- { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." },
- { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."}
- ],
- "experimental": true
- },
- {
- "id": "ScriptTypeProfile",
- "type": "object",
- "description": "Type profile data collected during runtime for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "setSamplingInterval",
- "parameters": [
- { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
- ],
- "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
- },
- {
- "name": "start"
- },
- {
- "name": "stop",
- "returns": [
- { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
- ]
- },
- {
- "name": "startPreciseCoverage",
- "parameters": [
- { "name": "callCount", "type": "boolean", "optional": true, "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'." },
- { "name": "detailed", "type": "boolean", "optional": true, "description": "Collect block-based coverage." }
- ],
- "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters."
- },
- {
- "name": "stopPreciseCoverage",
- "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code."
- },
- {
- "name": "takePreciseCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started."
- },
- {
- "name": "getBestEffortCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection."
- },
- {
- "name": "startTypeProfile",
- "description": "Enable type profile.",
- "experimental": true
- },
- {
- "name": "stopTypeProfile",
- "description": "Disable type profile. Disabling releases type profile data collected so far.",
- "experimental": true
- },
- {
- "name": "takeTypeProfile",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." }
- ],
- "description": "Collect type profile.",
- "experimental": true
- }
- ],
- "events": [
- {
- "name": "consoleProfileStarted",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ],
- "description": "Sent when new profile recording is started using console.profile() call."
- },
- {
- "name": "consoleProfileFinished",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
- { "name": "profile", "$ref": "Profile" },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ]
- }
- ]
- },
- {
- "domain": "HeapProfiler",
- "dependencies": ["Runtime"],
- "experimental": true,
- "types": [
- {
- "id": "HeapSnapshotObjectId",
- "type": "string",
- "description": "Heap snapshot object id."
- },
- {
- "id": "SamplingHeapProfileNode",
- "type": "object",
- "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
- "properties": [
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
- { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
- ]
- },
- {
- "id": "SamplingHeapProfile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "head", "$ref": "SamplingHeapProfileNode" }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "startTrackingHeapObjects",
- "parameters": [
- { "name": "trackAllocations", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "stopTrackingHeapObjects",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
- ]
- },
- {
- "name": "takeHeapSnapshot",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
- ]
- },
- {
- "name": "collectGarbage"
- },
- {
- "name": "getObjectByHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
- ]
- },
- {
- "name": "addInspectedHeapObject",
- "parameters": [
- { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
- ],
- "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
- },
- {
- "name": "getHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
- ],
- "returns": [
- { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
- ]
- },
- {
- "name": "startSampling",
- "parameters": [
- { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
- ]
- },
- {
- "name": "stopSampling",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
- ]
- },
- {
- "name": "getSamplingProfile",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Return the sampling profile being collected." }
- ]
- }
- ],
- "events": [
- {
- "name": "addHeapSnapshotChunk",
- "parameters": [
- { "name": "chunk", "type": "string" }
- ]
- },
- {
- "name": "resetProfiles"
- },
- {
- "name": "reportHeapSnapshotProgress",
- "parameters": [
- { "name": "done", "type": "integer" },
- { "name": "total", "type": "integer" },
- { "name": "finished", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regularly sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
- "parameters": [
- { "name": "lastSeenObjectId", "type": "integer" },
- { "name": "timestamp", "type": "number" }
- ]
- },
- {
- "name": "heapStatsUpdate",
- "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
- "parameters": [
- { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
- ]
- }
- ]
- }]
-}
+ "domains": [
+ {
+ "domain": "Console",
+ "description": "This domain is deprecated - use Runtime or Log instead.",
+ "deprecated": true,
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "ConsoleMessage",
+ "description": "Console message.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "source",
+ "description": "Message source.",
+ "type": "string",
+ "enum": [
+ "xml",
+ "javascript",
+ "network",
+ "console-api",
+ "storage",
+ "appcache",
+ "rendering",
+ "security",
+ "other",
+ "deprecation",
+ "worker"
+ ]
+ },
+ {
+ "name": "level",
+ "description": "Message severity.",
+ "type": "string",
+ "enum": [
+ "log",
+ "warning",
+ "error",
+ "debug",
+ "info"
+ ]
+ },
+ {
+ "name": "text",
+ "description": "Message text.",
+ "type": "string"
+ },
+ {
+ "name": "url",
+ "description": "URL of the message origin.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "line",
+ "description": "Line number in the resource that generated this message (1-based).",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "column",
+ "description": "Column number in the resource that generated this message (1-based).",
+ "optional": true,
+ "type": "integer"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "clearMessages",
+ "description": "Does nothing."
+ },
+ {
+ "name": "disable",
+ "description": "Disables console domain, prevents further console messages from being reported to the client."
+ },
+ {
+ "name": "enable",
+ "description": "Enables console domain, sends the messages collected so far to the client by means of the\n`messageAdded` notification."
+ }
+ ],
+ "events": [
+ {
+ "name": "messageAdded",
+ "description": "Issued when new console message is added.",
+ "parameters": [
+ {
+ "name": "message",
+ "description": "Console message that has been added.",
+ "$ref": "ConsoleMessage"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Debugger",
+ "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing\nbreakpoints, stepping through execution, exploring stack traces, etc.",
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "BreakpointId",
+ "description": "Breakpoint identifier.",
+ "type": "string"
+ },
+ {
+ "id": "CallFrameId",
+ "description": "Call frame identifier.",
+ "type": "string"
+ },
+ {
+ "id": "Location",
+ "description": "Location in the source code.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number in the script (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number in the script (0-based).",
+ "optional": true,
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "ScriptPosition",
+ "description": "Location in the source code.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "lineNumber",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "CallFrame",
+ "description": "JavaScript call frame. Array of call frames form the call stack.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused.",
+ "$ref": "CallFrameId"
+ },
+ {
+ "name": "functionName",
+ "description": "Name of the JavaScript function called on this call frame.",
+ "type": "string"
+ },
+ {
+ "name": "functionLocation",
+ "description": "Location in the source code.",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "location",
+ "description": "Location in the source code.",
+ "$ref": "Location"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "scopeChain",
+ "description": "Scope chain for this call frame.",
+ "type": "array",
+ "items": {
+ "$ref": "Scope"
+ }
+ },
+ {
+ "name": "this",
+ "description": "`this` object for this call frame.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "returnValue",
+ "description": "The value being returned, if the function is at return point.",
+ "optional": true,
+ "$ref": "Runtime.RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "Scope",
+ "description": "Scope description.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Scope type.",
+ "type": "string",
+ "enum": [
+ "global",
+ "local",
+ "with",
+ "closure",
+ "catch",
+ "block",
+ "script",
+ "eval",
+ "module"
+ ]
+ },
+ {
+ "name": "object",
+ "description": "Object representing the scope. For `global` and `with` scopes it represents the actual\nobject; for the rest of the scopes, it is artificial transient object enumerating scope\nvariables as its properties.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "name",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "startLocation",
+ "description": "Location in the source code where scope starts",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "endLocation",
+ "description": "Location in the source code where scope ends",
+ "optional": true,
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "id": "SearchMatch",
+ "description": "Search match for resource.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "lineNumber",
+ "description": "Line number in resource content.",
+ "type": "number"
+ },
+ {
+ "name": "lineContent",
+ "description": "Line with match content.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "id": "BreakLocation",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "Script identifier as reported in the `Debugger.scriptParsed`.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number in the script (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number in the script (0-based).",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "type",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "debuggerStatement",
+ "call",
+ "return"
+ ]
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "continueToLocation",
+ "description": "Continues execution until specific location is reached.",
+ "parameters": [
+ {
+ "name": "location",
+ "description": "Location to continue to.",
+ "$ref": "Location"
+ },
+ {
+ "name": "targetCallFrames",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "any",
+ "current"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "disable",
+ "description": "Disables debugger for given page."
+ },
+ {
+ "name": "enable",
+ "description": "Enables debugger for the given page. Clients should not assume that the debugging has been\nenabled until the result for this command is received.",
+ "returns": [
+ {
+ "name": "debuggerId",
+ "description": "Unique identifier of the debugger.",
+ "experimental": true,
+ "$ref": "Runtime.UniqueDebuggerId"
+ }
+ ]
+ },
+ {
+ "name": "evaluateOnCallFrame",
+ "description": "Evaluates expression on a given call frame.",
+ "parameters": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier to evaluate on.",
+ "$ref": "CallFrameId"
+ },
+ {
+ "name": "expression",
+ "description": "Expression to evaluate.",
+ "type": "string"
+ },
+ {
+ "name": "objectGroup",
+ "description": "String object group name to put result into (allows rapid releasing resulting object handles\nusing `releaseObjectGroup`).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Specifies whether command line API should be available to the evaluated expression, defaults\nto false.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "throwOnSideEffect",
+ "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Object wrapper for the evaluation result.",
+ "$ref": "Runtime.RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "Runtime.ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "getPossibleBreakpoints",
+ "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be\nthe same.",
+ "parameters": [
+ {
+ "name": "start",
+ "description": "Start of range to search possible breakpoint locations in.",
+ "$ref": "Location"
+ },
+ {
+ "name": "end",
+ "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end\nof scripts is used as end of range.",
+ "optional": true,
+ "$ref": "Location"
+ },
+ {
+ "name": "restrictToFunction",
+ "description": "Only consider locations which are in the same (non-nested) function as start.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "locations",
+ "description": "List of the possible breakpoint locations.",
+ "type": "array",
+ "items": {
+ "$ref": "BreakLocation"
+ }
+ }
+ ]
+ },
+ {
+ "name": "getScriptSource",
+ "description": "Returns source for the script with given id.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to get source for.",
+ "$ref": "Runtime.ScriptId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "scriptSource",
+ "description": "Script source.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "getStackTrace",
+ "description": "Returns stack trace with given `stackTraceId`.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "stackTraceId",
+ "$ref": "Runtime.StackTraceId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "stackTrace",
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ },
+ {
+ "name": "pause",
+ "description": "Stops on the next JavaScript statement."
+ },
+ {
+ "name": "pauseOnAsyncCall",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "parentStackTraceId",
+ "description": "Debugger will pause when async call with given stack trace is started.",
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "removeBreakpoint",
+ "description": "Removes JavaScript breakpoint.",
+ "parameters": [
+ {
+ "name": "breakpointId",
+ "$ref": "BreakpointId"
+ }
+ ]
+ },
+ {
+ "name": "restartFrame",
+ "description": "Restarts particular call frame from the beginning.",
+ "parameters": [
+ {
+ "name": "callFrameId",
+ "description": "Call frame identifier to evaluate on.",
+ "$ref": "CallFrameId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "callFrames",
+ "description": "New stack trace.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "resume",
+ "description": "Resumes JavaScript execution."
+ },
+ {
+ "name": "scheduleStepIntoAsync",
+ "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and\nDebugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled\nbefore next pause. Returns success when async task is actually scheduled, returns error if no\ntask were scheduled or another scheduleStepIntoAsync was called.",
+ "experimental": true
+ },
+ {
+ "name": "searchInContent",
+ "description": "Searches for given string in script content.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to search in.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "query",
+ "description": "String to search for.",
+ "type": "string"
+ },
+ {
+ "name": "caseSensitive",
+ "description": "If true, search is case sensitive.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isRegex",
+ "description": "If true, treats string parameter as regex.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "List of search matches.",
+ "type": "array",
+ "items": {
+ "$ref": "SearchMatch"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setAsyncCallStackDepth",
+ "description": "Enables or disables async call stacks tracking.",
+ "parameters": [
+ {
+ "name": "maxDepth",
+ "description": "Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async\ncall stacks (default).",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "setBlackboxPatterns",
+ "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in\nscripts with url matching one of the patterns. VM will try to leave blackboxed script by\nperforming 'step in' several times, finally resorting to 'step out' if unsuccessful.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "patterns",
+ "description": "Array of regexps that will be used to check script url for blackbox state.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBlackboxedRanges",
+ "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted\nscripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.\nPositions array contains positions where blackbox state is changed. First interval isn't\nblackboxed. Array should be sorted.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "positions",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptPosition"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBreakpoint",
+ "description": "Sets JavaScript breakpoint at a given location.",
+ "parameters": [
+ {
+ "name": "location",
+ "description": "Location to set breakpoint in.",
+ "$ref": "Location"
+ },
+ {
+ "name": "condition",
+ "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "breakpointId",
+ "description": "Id of the created breakpoint for further reference.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "actualLocation",
+ "description": "Location this breakpoint resolved into.",
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "name": "setBreakpointByUrl",
+ "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this\ncommand is issued, all existing parsed scripts will have breakpoints resolved and returned in\n`locations` property. Further matching script parsing will result in subsequent\n`breakpointResolved` events issued. This logical breakpoint will survive page reloads.",
+ "parameters": [
+ {
+ "name": "lineNumber",
+ "description": "Line number to set breakpoint at.",
+ "type": "integer"
+ },
+ {
+ "name": "url",
+ "description": "URL of the resources to set breakpoint on.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "urlRegex",
+ "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or\n`urlRegex` must be specified.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "scriptHash",
+ "description": "Script hash of the resources to set breakpoint on.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Offset in the line to set breakpoint at.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "condition",
+ "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the\nbreakpoint if this expression evaluates to true.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "breakpointId",
+ "description": "Id of the created breakpoint for further reference.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "locations",
+ "description": "List of the locations this breakpoint resolved into upon addition.",
+ "type": "array",
+ "items": {
+ "$ref": "Location"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setBreakpointsActive",
+ "description": "Activates / deactivates all breakpoints on the page.",
+ "parameters": [
+ {
+ "name": "active",
+ "description": "New value for breakpoints active state.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "setPauseOnExceptions",
+ "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or\nno exceptions. Initial pause on exceptions state is `none`.",
+ "parameters": [
+ {
+ "name": "state",
+ "description": "Pause on exceptions mode.",
+ "type": "string",
+ "enum": [
+ "none",
+ "uncaught",
+ "all"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "setReturnValue",
+ "description": "Changes return value in top frame. Available only at return break position.",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "newValue",
+ "description": "New return value.",
+ "$ref": "Runtime.CallArgument"
+ }
+ ]
+ },
+ {
+ "name": "setScriptSource",
+ "description": "Edits JavaScript source live.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to edit.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "scriptSource",
+ "description": "New content of the script.",
+ "type": "string"
+ },
+ {
+ "name": "dryRun",
+ "description": "If true the change will not actually be applied. Dry run may be used to get result\ndescription without actually modifying the code.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "callFrames",
+ "description": "New stack trace in case editing has happened while VM was stopped.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "stackChanged",
+ "description": "Whether current call stack was modified after applying the changes.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details if any.",
+ "optional": true,
+ "$ref": "Runtime.ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "setSkipAllPauses",
+ "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).",
+ "parameters": [
+ {
+ "name": "skip",
+ "description": "New value for skip pauses state.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "setVariableValue",
+ "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be\nmutated manually.",
+ "parameters": [
+ {
+ "name": "scopeNumber",
+ "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'\nscope types are allowed. Other scopes could be manipulated manually.",
+ "type": "integer"
+ },
+ {
+ "name": "variableName",
+ "description": "Variable name.",
+ "type": "string"
+ },
+ {
+ "name": "newValue",
+ "description": "New variable value.",
+ "$ref": "Runtime.CallArgument"
+ },
+ {
+ "name": "callFrameId",
+ "description": "Id of callframe that holds variable.",
+ "$ref": "CallFrameId"
+ }
+ ]
+ },
+ {
+ "name": "stepInto",
+ "description": "Steps into the function call.",
+ "parameters": [
+ {
+ "name": "breakOnAsyncCall",
+ "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled\nbefore next pause.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "stepOut",
+ "description": "Steps out of the function call."
+ },
+ {
+ "name": "stepOver",
+ "description": "Steps over the statement."
+ }
+ ],
+ "events": [
+ {
+ "name": "breakpointResolved",
+ "description": "Fired when breakpoint is resolved to an actual script and location.",
+ "parameters": [
+ {
+ "name": "breakpointId",
+ "description": "Breakpoint unique identifier.",
+ "$ref": "BreakpointId"
+ },
+ {
+ "name": "location",
+ "description": "Actual breakpoint location.",
+ "$ref": "Location"
+ }
+ ]
+ },
+ {
+ "name": "paused",
+ "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.",
+ "parameters": [
+ {
+ "name": "callFrames",
+ "description": "Call stack the virtual machine stopped on.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "reason",
+ "description": "Pause reason.",
+ "type": "string",
+ "enum": [
+ "XHR",
+ "DOM",
+ "EventListener",
+ "exception",
+ "assert",
+ "debugCommand",
+ "promiseRejection",
+ "OOM",
+ "other",
+ "ambiguous"
+ ]
+ },
+ {
+ "name": "data",
+ "description": "Object containing break-specific auxiliary properties.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "hitBreakpoints",
+ "description": "Hit breakpoints IDs",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "asyncStackTrace",
+ "description": "Async stack trace, if any.",
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ },
+ {
+ "name": "asyncStackTraceId",
+ "description": "Async stack trace, if any.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ },
+ {
+ "name": "asyncCallStackTraceId",
+ "description": "Just scheduled async call will have this stack trace as parent stack during async execution.\nThis field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTraceId"
+ }
+ ]
+ },
+ {
+ "name": "resumed",
+ "description": "Fired when the virtual machine resumed execution."
+ },
+ {
+ "name": "scriptFailedToParse",
+ "description": "Fired when virtual machine fails to parse the script.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Identifier of the script parsed.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL or name of the script parsed (if any).",
+ "type": "string"
+ },
+ {
+ "name": "startLine",
+ "description": "Line offset of the script within the resource with given URL (for script tags).",
+ "type": "integer"
+ },
+ {
+ "name": "startColumn",
+ "description": "Column offset of the script within the resource with given URL.",
+ "type": "integer"
+ },
+ {
+ "name": "endLine",
+ "description": "Last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "endColumn",
+ "description": "Length of the last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies script creation context.",
+ "$ref": "Runtime.ExecutionContextId"
+ },
+ {
+ "name": "hash",
+ "description": "Content hash of the script.",
+ "type": "string"
+ },
+ {
+ "name": "executionContextAuxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "sourceMapURL",
+ "description": "URL of source map associated with script (if any).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "hasSourceURL",
+ "description": "True, if this script has sourceURL.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isModule",
+ "description": "True, if this script is ES6 module.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "length",
+ "description": "This script length.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ },
+ {
+ "name": "scriptParsed",
+ "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected\nscripts upon enabling debugger.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Identifier of the script parsed.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL or name of the script parsed (if any).",
+ "type": "string"
+ },
+ {
+ "name": "startLine",
+ "description": "Line offset of the script within the resource with given URL (for script tags).",
+ "type": "integer"
+ },
+ {
+ "name": "startColumn",
+ "description": "Column offset of the script within the resource with given URL.",
+ "type": "integer"
+ },
+ {
+ "name": "endLine",
+ "description": "Last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "endColumn",
+ "description": "Length of the last line of the script.",
+ "type": "integer"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies script creation context.",
+ "$ref": "Runtime.ExecutionContextId"
+ },
+ {
+ "name": "hash",
+ "description": "Content hash of the script.",
+ "type": "string"
+ },
+ {
+ "name": "executionContextAuxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ },
+ {
+ "name": "isLiveEdit",
+ "description": "True, if this script is generated as a result of the live edit operation.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "sourceMapURL",
+ "description": "URL of source map associated with script (if any).",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "hasSourceURL",
+ "description": "True, if this script has sourceURL.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isModule",
+ "description": "True, if this script is ES6 module.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "length",
+ "description": "This script length.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript top stack frame of where the script parsed event was triggered if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "Runtime.StackTrace"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "HeapProfiler",
+ "experimental": true,
+ "dependencies": [
+ "Runtime"
+ ],
+ "types": [
+ {
+ "id": "HeapSnapshotObjectId",
+ "description": "Heap snapshot object id.",
+ "type": "string"
+ },
+ {
+ "id": "SamplingHeapProfileNode",
+ "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "callFrame",
+ "description": "Function location.",
+ "$ref": "Runtime.CallFrame"
+ },
+ {
+ "name": "selfSize",
+ "description": "Allocations size in bytes for the node excluding children.",
+ "type": "number"
+ },
+ {
+ "name": "children",
+ "description": "Child nodes.",
+ "type": "array",
+ "items": {
+ "$ref": "SamplingHeapProfileNode"
+ }
+ }
+ ]
+ },
+ {
+ "id": "SamplingHeapProfile",
+ "description": "Profile.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "head",
+ "$ref": "SamplingHeapProfileNode"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "addInspectedHeapObject",
+ "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details\n$x functions).",
+ "parameters": [
+ {
+ "name": "heapObjectId",
+ "description": "Heap snapshot object id to be accessible by means of $x command line API.",
+ "$ref": "HeapSnapshotObjectId"
+ }
+ ]
+ },
+ {
+ "name": "collectGarbage"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "enable"
+ },
+ {
+ "name": "getHeapObjectId",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to get heap object id for.",
+ "$ref": "Runtime.RemoteObjectId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "heapSnapshotObjectId",
+ "description": "Id of the heap snapshot object corresponding to the passed remote object id.",
+ "$ref": "HeapSnapshotObjectId"
+ }
+ ]
+ },
+ {
+ "name": "getObjectByHeapObjectId",
+ "parameters": [
+ {
+ "name": "objectId",
+ "$ref": "HeapSnapshotObjectId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Evaluation result.",
+ "$ref": "Runtime.RemoteObject"
+ }
+ ]
+ },
+ {
+ "name": "getSamplingProfile",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Return the sampling profile being collected.",
+ "$ref": "SamplingHeapProfile"
+ }
+ ]
+ },
+ {
+ "name": "startSampling",
+ "parameters": [
+ {
+ "name": "samplingInterval",
+ "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The\ndefault value is 32768 bytes.",
+ "optional": true,
+ "type": "number"
+ }
+ ]
+ },
+ {
+ "name": "startTrackingHeapObjects",
+ "parameters": [
+ {
+ "name": "trackAllocations",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "stopSampling",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Recorded sampling heap profile.",
+ "$ref": "SamplingHeapProfile"
+ }
+ ]
+ },
+ {
+ "name": "stopTrackingHeapObjects",
+ "parameters": [
+ {
+ "name": "reportProgress",
+ "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken\nwhen the tracking is stopped.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "takeHeapSnapshot",
+ "parameters": [
+ {
+ "name": "reportProgress",
+ "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "addHeapSnapshotChunk",
+ "parameters": [
+ {
+ "name": "chunk",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "heapStatsUpdate",
+ "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+ "parameters": [
+ {
+ "name": "statsUpdate",
+ "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment\nindex, the second integer is a total count of objects for the fragment, the third integer is\na total size of the objects for the fragment.",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ }
+ ]
+ },
+ {
+ "name": "lastSeenObjectId",
+ "description": "If heap objects tracking has been started then backend regularly sends a current value for last\nseen object id and corresponding timestamp. If the were changes in the heap since last event\nthen one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "parameters": [
+ {
+ "name": "lastSeenObjectId",
+ "type": "integer"
+ },
+ {
+ "name": "timestamp",
+ "type": "number"
+ }
+ ]
+ },
+ {
+ "name": "reportHeapSnapshotProgress",
+ "parameters": [
+ {
+ "name": "done",
+ "type": "integer"
+ },
+ {
+ "name": "total",
+ "type": "integer"
+ },
+ {
+ "name": "finished",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "resetProfiles"
+ }
+ ]
+ },
+ {
+ "domain": "Profiler",
+ "dependencies": [
+ "Runtime",
+ "Debugger"
+ ],
+ "types": [
+ {
+ "id": "ProfileNode",
+ "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "description": "Unique id of the node.",
+ "type": "integer"
+ },
+ {
+ "name": "callFrame",
+ "description": "Function location.",
+ "$ref": "Runtime.CallFrame"
+ },
+ {
+ "name": "hitCount",
+ "description": "Number of samples where this node was on top of the call stack.",
+ "optional": true,
+ "type": "integer"
+ },
+ {
+ "name": "children",
+ "description": "Child node ids.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "deoptReason",
+ "description": "The reason of being not optimized. The function may be deoptimized or marked as don't\noptimize.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "positionTicks",
+ "description": "An array of source position ticks.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "PositionTickInfo"
+ }
+ }
+ ]
+ },
+ {
+ "id": "Profile",
+ "description": "Profile.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "nodes",
+ "description": "The list of profile nodes. First item is the root node.",
+ "type": "array",
+ "items": {
+ "$ref": "ProfileNode"
+ }
+ },
+ {
+ "name": "startTime",
+ "description": "Profiling start timestamp in microseconds.",
+ "type": "number"
+ },
+ {
+ "name": "endTime",
+ "description": "Profiling end timestamp in microseconds.",
+ "type": "number"
+ },
+ {
+ "name": "samples",
+ "description": "Ids of samples top nodes.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "timeDeltas",
+ "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the\nprofile startTime.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ }
+ ]
+ },
+ {
+ "id": "PositionTickInfo",
+ "description": "Specifies a number of samples attributed to a certain source position.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "line",
+ "description": "Source line number (1-based).",
+ "type": "integer"
+ },
+ {
+ "name": "ticks",
+ "description": "Number of samples attributed to the source line.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "CoverageRange",
+ "description": "Coverage data for a source range.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "startOffset",
+ "description": "JavaScript script source offset for the range start.",
+ "type": "integer"
+ },
+ {
+ "name": "endOffset",
+ "description": "JavaScript script source offset for the range end.",
+ "type": "integer"
+ },
+ {
+ "name": "count",
+ "description": "Collected execution count of the source range.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "FunctionCoverage",
+ "description": "Coverage data for a JavaScript function.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "functionName",
+ "description": "JavaScript function name.",
+ "type": "string"
+ },
+ {
+ "name": "ranges",
+ "description": "Source ranges inside the function with coverage data.",
+ "type": "array",
+ "items": {
+ "$ref": "CoverageRange"
+ }
+ },
+ {
+ "name": "isBlockCoverage",
+ "description": "Whether coverage data for this function has block granularity.",
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "id": "ScriptCoverage",
+ "description": "Coverage data for a JavaScript script.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "functions",
+ "description": "Functions contained in the script that has coverage data.",
+ "type": "array",
+ "items": {
+ "$ref": "FunctionCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "id": "TypeObject",
+ "description": "Describes a type collected during runtime.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Name of a type collected with type profiling.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "id": "TypeProfileEntry",
+ "description": "Source offset and types for a parameter or return value.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "offset",
+ "description": "Source offset of the parameter or end of function for return values.",
+ "type": "integer"
+ },
+ {
+ "name": "types",
+ "description": "The types for this parameter or return value.",
+ "type": "array",
+ "items": {
+ "$ref": "TypeObject"
+ }
+ }
+ ]
+ },
+ {
+ "id": "ScriptTypeProfile",
+ "description": "Type profile data collected during runtime for a JavaScript script.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "Runtime.ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "entries",
+ "description": "Type profile entries for parameters and return values of the functions in the script.",
+ "type": "array",
+ "items": {
+ "$ref": "TypeProfileEntry"
+ }
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "disable"
+ },
+ {
+ "name": "enable"
+ },
+ {
+ "name": "getBestEffortCoverage",
+ "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to\ngarbage collection.",
+ "returns": [
+ {
+ "name": "result",
+ "description": "Coverage data for the current isolate.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "name": "setSamplingInterval",
+ "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.",
+ "parameters": [
+ {
+ "name": "interval",
+ "description": "New sampling interval in microseconds.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "start"
+ },
+ {
+ "name": "startPreciseCoverage",
+ "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code\ncoverage may be incomplete. Enabling prevents running optimized code and resets execution\ncounters.",
+ "parameters": [
+ {
+ "name": "callCount",
+ "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "detailed",
+ "description": "Collect block-based coverage.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ]
+ },
+ {
+ "name": "startTypeProfile",
+ "description": "Enable type profile.",
+ "experimental": true
+ },
+ {
+ "name": "stop",
+ "returns": [
+ {
+ "name": "profile",
+ "description": "Recorded profile.",
+ "$ref": "Profile"
+ }
+ ]
+ },
+ {
+ "name": "stopPreciseCoverage",
+ "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows\nexecuting optimized code."
+ },
+ {
+ "name": "stopTypeProfile",
+ "description": "Disable type profile. Disabling releases type profile data collected so far.",
+ "experimental": true
+ },
+ {
+ "name": "takePreciseCoverage",
+ "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code\ncoverage needs to have started.",
+ "returns": [
+ {
+ "name": "result",
+ "description": "Coverage data for the current isolate.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptCoverage"
+ }
+ }
+ ]
+ },
+ {
+ "name": "takeTypeProfile",
+ "description": "Collect type profile.",
+ "experimental": true,
+ "returns": [
+ {
+ "name": "result",
+ "description": "Type profile for all scripts since startTypeProfile() was turned on.",
+ "type": "array",
+ "items": {
+ "$ref": "ScriptTypeProfile"
+ }
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleProfileFinished",
+ "parameters": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "location",
+ "description": "Location of console.profileEnd().",
+ "$ref": "Debugger.Location"
+ },
+ {
+ "name": "profile",
+ "$ref": "Profile"
+ },
+ {
+ "name": "title",
+ "description": "Profile title passed as an argument to console.profile().",
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "consoleProfileStarted",
+ "description": "Sent when new profile recording is started using console.profile() call.",
+ "parameters": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "location",
+ "description": "Location of console.profile().",
+ "$ref": "Debugger.Location"
+ },
+ {
+ "name": "title",
+ "description": "Profile title passed as an argument to console.profile().",
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Runtime",
+ "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.\nEvaluation results are returned as mirror object that expose object type, string representation\nand unique identifier that can be used for further object reference. Original objects are\nmaintained in memory unless they are either explicitly released or are released along with the\nother objects in their object group.",
+ "types": [
+ {
+ "id": "ScriptId",
+ "description": "Unique script identifier.",
+ "type": "string"
+ },
+ {
+ "id": "RemoteObjectId",
+ "description": "Unique object identifier.",
+ "type": "string"
+ },
+ {
+ "id": "UnserializableValue",
+ "description": "Primitive value which cannot be JSON-stringified.",
+ "type": "string",
+ "enum": [
+ "Infinity",
+ "NaN",
+ "-Infinity",
+ "-0"
+ ]
+ },
+ {
+ "id": "RemoteObject",
+ "description": "Mirror object referencing original JavaScript object.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Object type.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol"
+ ]
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error",
+ "proxy",
+ "promise",
+ "typedarray"
+ ]
+ },
+ {
+ "name": "className",
+ "description": "Object class (constructor) name. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "Remote object value in case of primitive values or JSON values (if it was requested).",
+ "optional": true,
+ "type": "any"
+ },
+ {
+ "name": "unserializableValue",
+ "description": "Primitive value which can not be JSON-stringified does not have `value`, but gets this\nproperty.",
+ "optional": true,
+ "$ref": "UnserializableValue"
+ },
+ {
+ "name": "description",
+ "description": "String representation of the object.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "objectId",
+ "description": "Unique object identifier (for non-primitive values).",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "preview",
+ "description": "Preview containing abbreviated property values. Specified for `object` type values only.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "customPreview",
+ "experimental": true,
+ "optional": true,
+ "$ref": "CustomPreview"
+ }
+ ]
+ },
+ {
+ "id": "CustomPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "header",
+ "type": "string"
+ },
+ {
+ "name": "hasBody",
+ "type": "boolean"
+ },
+ {
+ "name": "formatterObjectId",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "bindRemoteObjectFunctionId",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "configObjectId",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "id": "ObjectPreview",
+ "description": "Object containing abbreviated remote object value.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "type",
+ "description": "Object type.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol"
+ ]
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error"
+ ]
+ },
+ {
+ "name": "description",
+ "description": "String representation of the object.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "overflow",
+ "description": "True iff some of the properties or entries of the original object did not fit.",
+ "type": "boolean"
+ },
+ {
+ "name": "properties",
+ "description": "List of the properties.",
+ "type": "array",
+ "items": {
+ "$ref": "PropertyPreview"
+ }
+ },
+ {
+ "name": "entries",
+ "description": "List of the entries. Specified for `map` and `set` subtype values only.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "EntryPreview"
+ }
+ }
+ ]
+ },
+ {
+ "id": "PropertyPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Property name.",
+ "type": "string"
+ },
+ {
+ "name": "type",
+ "description": "Object type. Accessor means that the property itself is an accessor property.",
+ "type": "string",
+ "enum": [
+ "object",
+ "function",
+ "undefined",
+ "string",
+ "number",
+ "boolean",
+ "symbol",
+ "accessor"
+ ]
+ },
+ {
+ "name": "value",
+ "description": "User-friendly property value string.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "valuePreview",
+ "description": "Nested value preview.",
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "subtype",
+ "description": "Object subtype hint. Specified for `object` type values only.",
+ "optional": true,
+ "type": "string",
+ "enum": [
+ "array",
+ "null",
+ "node",
+ "regexp",
+ "date",
+ "map",
+ "set",
+ "weakmap",
+ "weakset",
+ "iterator",
+ "generator",
+ "error"
+ ]
+ }
+ ]
+ },
+ {
+ "id": "EntryPreview",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "key",
+ "description": "Preview of the key. Specified for map-like collection entries.",
+ "optional": true,
+ "$ref": "ObjectPreview"
+ },
+ {
+ "name": "value",
+ "description": "Preview of the value.",
+ "$ref": "ObjectPreview"
+ }
+ ]
+ },
+ {
+ "id": "PropertyDescriptor",
+ "description": "Object property descriptor.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Property name or symbol description.",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "The value associated with the property.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "writable",
+ "description": "True if the value associated with the property may be changed (data descriptors only).",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "get",
+ "description": "A function which serves as a getter for the property, or `undefined` if there is no getter\n(accessor descriptors only).",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "set",
+ "description": "A function which serves as a setter for the property, or `undefined` if there is no setter\n(accessor descriptors only).",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "configurable",
+ "description": "True if the type of this property descriptor may be changed and if the property may be\ndeleted from the corresponding object.",
+ "type": "boolean"
+ },
+ {
+ "name": "enumerable",
+ "description": "True if this property shows up during enumeration of the properties on the corresponding\nobject.",
+ "type": "boolean"
+ },
+ {
+ "name": "wasThrown",
+ "description": "True if the result was thrown during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "isOwn",
+ "description": "True if the property is owned for the object.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "symbol",
+ "description": "Property symbol object, if the property is of the `symbol` type.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "InternalPropertyDescriptor",
+ "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Conventional property name.",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "description": "The value associated with the property.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "id": "CallArgument",
+ "description": "Represents function call argument. Either remote object id `objectId`, primitive `value`,\nunserializable primitive value or neither of (for undefined) them should be specified.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "value",
+ "description": "Primitive value or serializable javascript object.",
+ "optional": true,
+ "type": "any"
+ },
+ {
+ "name": "unserializableValue",
+ "description": "Primitive value which can not be JSON-stringified.",
+ "optional": true,
+ "$ref": "UnserializableValue"
+ },
+ {
+ "name": "objectId",
+ "description": "Remote object handle.",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "id": "ExecutionContextId",
+ "description": "Id of an execution context.",
+ "type": "integer"
+ },
+ {
+ "id": "ExecutionContextDescription",
+ "description": "Description of an isolated world.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "description": "Unique id of the execution context. It can be used to specify in which execution context\nscript evaluation should be performed.",
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "origin",
+ "description": "Execution context origin.",
+ "type": "string"
+ },
+ {
+ "name": "name",
+ "description": "Human readable name describing given context.",
+ "type": "string"
+ },
+ {
+ "name": "auxData",
+ "description": "Embedder-specific auxiliary data.",
+ "optional": true,
+ "type": "object"
+ }
+ ]
+ },
+ {
+ "id": "ExceptionDetails",
+ "description": "Detailed information about exception (or error) that was thrown during script compilation or\nexecution.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "exceptionId",
+ "description": "Exception id.",
+ "type": "integer"
+ },
+ {
+ "name": "text",
+ "description": "Exception text, which should be used together with exception object when available.",
+ "type": "string"
+ },
+ {
+ "name": "lineNumber",
+ "description": "Line number of the exception location (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "Column number of the exception location (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "scriptId",
+ "description": "Script ID of the exception location.",
+ "optional": true,
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "URL of the exception location, to be used when the script was not reported.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "stackTrace",
+ "description": "JavaScript stack trace if available.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "exception",
+ "description": "Exception object if available.",
+ "optional": true,
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Identifier of the context where exception happened.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
+ "id": "Timestamp",
+ "description": "Number of milliseconds since epoch.",
+ "type": "number"
+ },
+ {
+ "id": "CallFrame",
+ "description": "Stack entry for runtime errors and assertions.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "functionName",
+ "description": "JavaScript function name.",
+ "type": "string"
+ },
+ {
+ "name": "scriptId",
+ "description": "JavaScript script id.",
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "url",
+ "description": "JavaScript script name or url.",
+ "type": "string"
+ },
+ {
+ "name": "lineNumber",
+ "description": "JavaScript script line number (0-based).",
+ "type": "integer"
+ },
+ {
+ "name": "columnNumber",
+ "description": "JavaScript script column number (0-based).",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "id": "StackTrace",
+ "description": "Call frames for assertions or error messages.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "description",
+ "description": "String label of this stack trace. For async traces this may be a name of the function that\ninitiated the async call.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "callFrames",
+ "description": "JavaScript function name.",
+ "type": "array",
+ "items": {
+ "$ref": "CallFrame"
+ }
+ },
+ {
+ "name": "parent",
+ "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "parentId",
+ "description": "Asynchronous JavaScript stack trace that preceded this stack, if available.",
+ "experimental": true,
+ "optional": true,
+ "$ref": "StackTraceId"
+ }
+ ]
+ },
+ {
+ "id": "UniqueDebuggerId",
+ "description": "Unique identifier of current debugger.",
+ "experimental": true,
+ "type": "string"
+ },
+ {
+ "id": "StackTraceId",
+ "description": "If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This\nallows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "debuggerId",
+ "optional": true,
+ "$ref": "UniqueDebuggerId"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "awaitPromise",
+ "description": "Add handler to promise with given promise object id.",
+ "parameters": [
+ {
+ "name": "promiseObjectId",
+ "description": "Identifier of the promise.",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Promise result. Will contain rejected value if promise was rejected.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details if stack strace is available.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "callFunctionOn",
+ "description": "Calls function with given declaration on the given object. Object group of the result is\ninherited from the target object.",
+ "parameters": [
+ {
+ "name": "functionDeclaration",
+ "description": "Declaration of the function to call.",
+ "type": "string"
+ },
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to call function on. Either objectId or executionContextId should\nbe specified.",
+ "optional": true,
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "arguments",
+ "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target\nobject.",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "CallArgument"
+ }
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object which should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "userGesture",
+ "description": "Whether execution should be treated as initiated by user in the UI.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies execution context which global object will be used to call function on. Either\nexecutionContextId or objectId should be specified.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not\nspecified and objectId is, objectGroup will be inherited from object.",
+ "optional": true,
+ "type": "string"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Call result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "compileScript",
+ "description": "Compiles expression.",
+ "parameters": [
+ {
+ "name": "expression",
+ "description": "Expression to compile.",
+ "type": "string"
+ },
+ {
+ "name": "sourceURL",
+ "description": "Source url to be set for the script.",
+ "type": "string"
+ },
+ {
+ "name": "persistScript",
+ "description": "Specifies whether the compiled script should be persisted.",
+ "type": "boolean"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script.",
+ "optional": true,
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "disable",
+ "description": "Disables reporting of execution contexts creation."
+ },
+ {
+ "name": "discardConsoleEntries",
+ "description": "Discards collected exceptions and console API calls."
+ },
+ {
+ "name": "enable",
+ "description": "Enables reporting of execution contexts creation by means of `executionContextCreated` event.\nWhen the reporting gets enabled the event will be sent immediately for each existing execution\ncontext."
+ },
+ {
+ "name": "evaluate",
+ "description": "Evaluates expression on global object.",
+ "parameters": [
+ {
+ "name": "expression",
+ "description": "Expression to evaluate.",
+ "type": "string"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Determines whether Command Line API should be available during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "contextId",
+ "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object that should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "userGesture",
+ "description": "Whether execution should be treated as initiated by user in the UI.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Evaluation result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "getProperties",
+ "description": "Returns properties of a given object. Object group of the result is inherited from the target\nobject.",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to return properties for.",
+ "$ref": "RemoteObjectId"
+ },
+ {
+ "name": "ownProperties",
+ "description": "If true, returns properties belonging only to the element itself, not to its prototype\nchain.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "accessorPropertiesOnly",
+ "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not\nreturned either.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the results.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Object properties.",
+ "type": "array",
+ "items": {
+ "$ref": "PropertyDescriptor"
+ }
+ },
+ {
+ "name": "internalProperties",
+ "description": "Internal object properties (only of the element itself).",
+ "optional": true,
+ "type": "array",
+ "items": {
+ "$ref": "InternalPropertyDescriptor"
+ }
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "globalLexicalScopeNames",
+ "description": "Returns all let, const and class variables from global scope.",
+ "parameters": [
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to lookup global scope variables.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "names",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ {
+ "name": "queryObjects",
+ "parameters": [
+ {
+ "name": "prototypeObjectId",
+ "description": "Identifier of the prototype to return objects for.",
+ "$ref": "RemoteObjectId"
+ }
+ ],
+ "returns": [
+ {
+ "name": "objects",
+ "description": "Array with objects.",
+ "$ref": "RemoteObject"
+ }
+ ]
+ },
+ {
+ "name": "releaseObject",
+ "description": "Releases remote object with given id.",
+ "parameters": [
+ {
+ "name": "objectId",
+ "description": "Identifier of the object to release.",
+ "$ref": "RemoteObjectId"
+ }
+ ]
+ },
+ {
+ "name": "releaseObjectGroup",
+ "description": "Releases all remote objects that belong to a given group.",
+ "parameters": [
+ {
+ "name": "objectGroup",
+ "description": "Symbolic object group name.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "runIfWaitingForDebugger",
+ "description": "Tells inspected instance to run if it was waiting for debugger to attach."
+ },
+ {
+ "name": "runScript",
+ "description": "Runs script with given id in a given context.",
+ "parameters": [
+ {
+ "name": "scriptId",
+ "description": "Id of the script to run.",
+ "$ref": "ScriptId"
+ },
+ {
+ "name": "executionContextId",
+ "description": "Specifies in which execution context to perform script run. If the parameter is omitted the\nevaluation will be performed in the context of the inspected page.",
+ "optional": true,
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release multiple objects.",
+ "optional": true,
+ "type": "string"
+ },
+ {
+ "name": "silent",
+ "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause\nexecution. Overrides `setPauseOnException` state.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "includeCommandLineAPI",
+ "description": "Determines whether Command Line API should be available during the evaluation.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "returnByValue",
+ "description": "Whether the result is expected to be a JSON object which should be sent by value.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "generatePreview",
+ "description": "Whether preview should be generated for the result.",
+ "optional": true,
+ "type": "boolean"
+ },
+ {
+ "name": "awaitPromise",
+ "description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
+ "optional": true,
+ "type": "boolean"
+ }
+ ],
+ "returns": [
+ {
+ "name": "result",
+ "description": "Run result.",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "exceptionDetails",
+ "description": "Exception details.",
+ "optional": true,
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "setCustomObjectFormatterEnabled",
+ "experimental": true,
+ "parameters": [
+ {
+ "name": "enabled",
+ "type": "boolean"
+ }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleAPICalled",
+ "description": "Issued when console API was called.",
+ "parameters": [
+ {
+ "name": "type",
+ "description": "Type of the call.",
+ "type": "string",
+ "enum": [
+ "log",
+ "debug",
+ "info",
+ "error",
+ "warning",
+ "dir",
+ "dirxml",
+ "table",
+ "trace",
+ "clear",
+ "startGroup",
+ "startGroupCollapsed",
+ "endGroup",
+ "assert",
+ "profile",
+ "profileEnd",
+ "count",
+ "timeEnd"
+ ]
+ },
+ {
+ "name": "args",
+ "description": "Call arguments.",
+ "type": "array",
+ "items": {
+ "$ref": "RemoteObject"
+ }
+ },
+ {
+ "name": "executionContextId",
+ "description": "Identifier of the context where the call was made.",
+ "$ref": "ExecutionContextId"
+ },
+ {
+ "name": "timestamp",
+ "description": "Call timestamp.",
+ "$ref": "Timestamp"
+ },
+ {
+ "name": "stackTrace",
+ "description": "Stack trace captured when the call was made.",
+ "optional": true,
+ "$ref": "StackTrace"
+ },
+ {
+ "name": "context",
+ "description": "Console context descriptor for calls on non-default console context (not console.*):\n'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call\non named context.",
+ "experimental": true,
+ "optional": true,
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "exceptionRevoked",
+ "description": "Issued when unhandled exception was revoked.",
+ "parameters": [
+ {
+ "name": "reason",
+ "description": "Reason describing why exception was revoked.",
+ "type": "string"
+ },
+ {
+ "name": "exceptionId",
+ "description": "The id of revoked exception, as reported in `exceptionThrown`.",
+ "type": "integer"
+ }
+ ]
+ },
+ {
+ "name": "exceptionThrown",
+ "description": "Issued when exception was thrown and unhandled.",
+ "parameters": [
+ {
+ "name": "timestamp",
+ "description": "Timestamp of the exception.",
+ "$ref": "Timestamp"
+ },
+ {
+ "name": "exceptionDetails",
+ "$ref": "ExceptionDetails"
+ }
+ ]
+ },
+ {
+ "name": "executionContextCreated",
+ "description": "Issued when new execution context is created.",
+ "parameters": [
+ {
+ "name": "context",
+ "description": "A newly created execution context.",
+ "$ref": "ExecutionContextDescription"
+ }
+ ]
+ },
+ {
+ "name": "executionContextDestroyed",
+ "description": "Issued when execution context is destroyed.",
+ "parameters": [
+ {
+ "name": "executionContextId",
+ "description": "Id of the destroyed context",
+ "$ref": "ExecutionContextId"
+ }
+ ]
+ },
+ {
+ "name": "executionContextsCleared",
+ "description": "Issued when all executionContexts were cleared in browser"
+ },
+ {
+ "name": "inspectRequested",
+ "description": "Issued when object should be inspected (for example, as a result of inspect() command line API\ncall).",
+ "parameters": [
+ {
+ "name": "object",
+ "$ref": "RemoteObject"
+ },
+ {
+ "name": "hints",
+ "type": "object"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Schema",
+ "description": "This domain is deprecated.",
+ "deprecated": true,
+ "types": [
+ {
+ "id": "Domain",
+ "description": "Description of the protocol domain.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "name",
+ "description": "Domain name.",
+ "type": "string"
+ },
+ {
+ "name": "version",
+ "description": "Domain version.",
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "getDomains",
+ "description": "Returns supported domains.",
+ "returns": [
+ {
+ "name": "domains",
+ "description": "List of supported domains.",
+ "type": "array",
+ "items": {
+ "$ref": "Domain"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
new file mode 100644
index 0000000000..5a23199e4a
--- /dev/null
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -0,0 +1,1370 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+version
+ major 1
+ minor 3
+
+# This domain is deprecated - use Runtime or Log instead.
+deprecated domain Console
+ depends on Runtime
+
+ # Console message.
+ type ConsoleMessage extends object
+ properties
+ # Message source.
+ enum source
+ xml
+ javascript
+ network
+ console-api
+ storage
+ appcache
+ rendering
+ security
+ other
+ deprecation
+ worker
+ # Message severity.
+ enum level
+ log
+ warning
+ error
+ debug
+ info
+ # Message text.
+ string text
+ # URL of the message origin.
+ optional string url
+ # Line number in the resource that generated this message (1-based).
+ optional integer line
+ # Column number in the resource that generated this message (1-based).
+ optional integer column
+
+ # Does nothing.
+ command clearMessages
+
+ # Disables console domain, prevents further console messages from being reported to the client.
+ command disable
+
+ # Enables console domain, sends the messages collected so far to the client by means of the
+ # `messageAdded` notification.
+ command enable
+
+ # Issued when new console message is added.
+ event messageAdded
+ parameters
+ # Console message that has been added.
+ ConsoleMessage message
+
+# Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing
+# breakpoints, stepping through execution, exploring stack traces, etc.
+domain Debugger
+ depends on Runtime
+
+ # Breakpoint identifier.
+ type BreakpointId extends string
+
+ # Call frame identifier.
+ type CallFrameId extends string
+
+ # Location in the source code.
+ type Location extends object
+ properties
+ # Script identifier as reported in the `Debugger.scriptParsed`.
+ Runtime.ScriptId scriptId
+ # Line number in the script (0-based).
+ integer lineNumber
+ # Column number in the script (0-based).
+ optional integer columnNumber
+
+ # Location in the source code.
+ experimental type ScriptPosition extends object
+ properties
+ integer lineNumber
+ integer columnNumber
+
+ # JavaScript call frame. Array of call frames form the call stack.
+ type CallFrame extends object
+ properties
+ # Call frame identifier. This identifier is only valid while the virtual machine is paused.
+ CallFrameId callFrameId
+ # Name of the JavaScript function called on this call frame.
+ string functionName
+ # Location in the source code.
+ optional Location functionLocation
+ # Location in the source code.
+ Location location
+ # JavaScript script name or url.
+ string url
+ # Scope chain for this call frame.
+ array of Scope scopeChain
+ # `this` object for this call frame.
+ Runtime.RemoteObject this
+ # The value being returned, if the function is at return point.
+ optional Runtime.RemoteObject returnValue
+
+ # Scope description.
+ type Scope extends object
+ properties
+ # Scope type.
+ enum type
+ global
+ local
+ with
+ closure
+ catch
+ block
+ script
+ eval
+ module
+ # Object representing the scope. For `global` and `with` scopes it represents the actual
+ # object; for the rest of the scopes, it is artificial transient object enumerating scope
+ # variables as its properties.
+ Runtime.RemoteObject object
+ optional string name
+ # Location in the source code where scope starts
+ optional Location startLocation
+ # Location in the source code where scope ends
+ optional Location endLocation
+
+ # Search match for resource.
+ type SearchMatch extends object
+ properties
+ # Line number in resource content.
+ number lineNumber
+ # Line with match content.
+ string lineContent
+
+ type BreakLocation extends object
+ properties
+ # Script identifier as reported in the `Debugger.scriptParsed`.
+ Runtime.ScriptId scriptId
+ # Line number in the script (0-based).
+ integer lineNumber
+ # Column number in the script (0-based).
+ optional integer columnNumber
+ optional enum type
+ debuggerStatement
+ call
+ return
+
+ # Continues execution until specific location is reached.
+ command continueToLocation
+ parameters
+ # Location to continue to.
+ Location location
+ optional enum targetCallFrames
+ any
+ current
+
+ # Disables debugger for given page.
+ command disable
+
+ # Enables debugger for the given page. Clients should not assume that the debugging has been
+ # enabled until the result for this command is received.
+ command enable
+ returns
+ # Unique identifier of the debugger.
+ experimental Runtime.UniqueDebuggerId debuggerId
+
+ # Evaluates expression on a given call frame.
+ command evaluateOnCallFrame
+ parameters
+ # Call frame identifier to evaluate on.
+ CallFrameId callFrameId
+ # Expression to evaluate.
+ string expression
+ # String object group name to put result into (allows rapid releasing resulting object handles
+ # using `releaseObjectGroup`).
+ optional string objectGroup
+ # Specifies whether command line API should be available to the evaluated expression, defaults
+ # to false.
+ optional boolean includeCommandLineAPI
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether to throw an exception if side effect cannot be ruled out during evaluation.
+ optional boolean throwOnSideEffect
+ returns
+ # Object wrapper for the evaluation result.
+ Runtime.RemoteObject result
+ # Exception details.
+ optional Runtime.ExceptionDetails exceptionDetails
+
+ # Returns possible locations for breakpoint. scriptId in start and end range locations should be
+ # the same.
+ command getPossibleBreakpoints
+ parameters
+ # Start of range to search possible breakpoint locations in.
+ Location start
+ # End of range to search possible breakpoint locations in (excluding). When not specified, end
+ # of scripts is used as end of range.
+ optional Location end
+ # Only consider locations which are in the same (non-nested) function as start.
+ optional boolean restrictToFunction
+ returns
+ # List of the possible breakpoint locations.
+ array of BreakLocation locations
+
+ # Returns source for the script with given id.
+ command getScriptSource
+ parameters
+ # Id of the script to get source for.
+ Runtime.ScriptId scriptId
+ returns
+ # Script source.
+ string scriptSource
+
+ # Returns stack trace with given `stackTraceId`.
+ experimental command getStackTrace
+ parameters
+ Runtime.StackTraceId stackTraceId
+ returns
+ Runtime.StackTrace stackTrace
+
+ # Stops on the next JavaScript statement.
+ command pause
+
+ experimental command pauseOnAsyncCall
+ parameters
+ # Debugger will pause when async call with given stack trace is started.
+ Runtime.StackTraceId parentStackTraceId
+
+ # Removes JavaScript breakpoint.
+ command removeBreakpoint
+ parameters
+ BreakpointId breakpointId
+
+ # Restarts particular call frame from the beginning.
+ command restartFrame
+ parameters
+ # Call frame identifier to evaluate on.
+ CallFrameId callFrameId
+ returns
+ # New stack trace.
+ array of CallFrame callFrames
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+
+ # Resumes JavaScript execution.
+ command resume
+
+ # This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and
+ # Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled
+ # before next pause. Returns success when async task is actually scheduled, returns error if no
+ # task were scheduled or another scheduleStepIntoAsync was called.
+ experimental command scheduleStepIntoAsync
+
+ # Searches for given string in script content.
+ command searchInContent
+ parameters
+ # Id of the script to search in.
+ Runtime.ScriptId scriptId
+ # String to search for.
+ string query
+ # If true, search is case sensitive.
+ optional boolean caseSensitive
+ # If true, treats string parameter as regex.
+ optional boolean isRegex
+ returns
+ # List of search matches.
+ array of SearchMatch result
+
+ # Enables or disables async call stacks tracking.
+ command setAsyncCallStackDepth
+ parameters
+ # Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
+ # call stacks (default).
+ integer maxDepth
+
+ # Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in
+ # scripts with url matching one of the patterns. VM will try to leave blackboxed script by
+ # performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
+ experimental command setBlackboxPatterns
+ parameters
+ # Array of regexps that will be used to check script url for blackbox state.
+ array of string patterns
+
+ # Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted
+ # scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
+ # Positions array contains positions where blackbox state is changed. First interval isn't
+ # blackboxed. Array should be sorted.
+ experimental command setBlackboxedRanges
+ parameters
+ # Id of the script.
+ Runtime.ScriptId scriptId
+ array of ScriptPosition positions
+
+ # Sets JavaScript breakpoint at a given location.
+ command setBreakpoint
+ parameters
+ # Location to set breakpoint in.
+ Location location
+ # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
+ # breakpoint if this expression evaluates to true.
+ optional string condition
+ returns
+ # Id of the created breakpoint for further reference.
+ BreakpointId breakpointId
+ # Location this breakpoint resolved into.
+ Location actualLocation
+
+ # Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this
+ # command is issued, all existing parsed scripts will have breakpoints resolved and returned in
+ # `locations` property. Further matching script parsing will result in subsequent
+ # `breakpointResolved` events issued. This logical breakpoint will survive page reloads.
+ command setBreakpointByUrl
+ parameters
+ # Line number to set breakpoint at.
+ integer lineNumber
+ # URL of the resources to set breakpoint on.
+ optional string url
+ # Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or
+ # `urlRegex` must be specified.
+ optional string urlRegex
+ # Script hash of the resources to set breakpoint on.
+ optional string scriptHash
+ # Offset in the line to set breakpoint at.
+ optional integer columnNumber
+ # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
+ # breakpoint if this expression evaluates to true.
+ optional string condition
+ returns
+ # Id of the created breakpoint for further reference.
+ BreakpointId breakpointId
+ # List of the locations this breakpoint resolved into upon addition.
+ array of Location locations
+
+ # Activates / deactivates all breakpoints on the page.
+ command setBreakpointsActive
+ parameters
+ # New value for breakpoints active state.
+ boolean active
+
+ # Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or
+ # no exceptions. Initial pause on exceptions state is `none`.
+ command setPauseOnExceptions
+ parameters
+ # Pause on exceptions mode.
+ enum state
+ none
+ uncaught
+ all
+
+ # Changes return value in top frame. Available only at return break position.
+ experimental command setReturnValue
+ parameters
+ # New return value.
+ Runtime.CallArgument newValue
+
+ # Edits JavaScript source live.
+ command setScriptSource
+ parameters
+ # Id of the script to edit.
+ Runtime.ScriptId scriptId
+ # New content of the script.
+ string scriptSource
+ # If true the change will not actually be applied. Dry run may be used to get result
+ # description without actually modifying the code.
+ optional boolean dryRun
+ returns
+ # New stack trace in case editing has happened while VM was stopped.
+ optional array of CallFrame callFrames
+ # Whether current call stack was modified after applying the changes.
+ optional boolean stackChanged
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+ # Exception details if any.
+ optional Runtime.ExceptionDetails exceptionDetails
+
+ # Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).
+ command setSkipAllPauses
+ parameters
+ # New value for skip pauses state.
+ boolean skip
+
+ # Changes value of variable in a callframe. Object-based scopes are not supported and must be
+ # mutated manually.
+ command setVariableValue
+ parameters
+ # 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'
+ # scope types are allowed. Other scopes could be manipulated manually.
+ integer scopeNumber
+ # Variable name.
+ string variableName
+ # New variable value.
+ Runtime.CallArgument newValue
+ # Id of callframe that holds variable.
+ CallFrameId callFrameId
+
+ # Steps into the function call.
+ command stepInto
+ parameters
+ # Debugger will issue additional Debugger.paused notification if any async task is scheduled
+ # before next pause.
+ experimental optional boolean breakOnAsyncCall
+
+ # Steps out of the function call.
+ command stepOut
+
+ # Steps over the statement.
+ command stepOver
+
+ # Fired when breakpoint is resolved to an actual script and location.
+ event breakpointResolved
+ parameters
+ # Breakpoint unique identifier.
+ BreakpointId breakpointId
+ # Actual breakpoint location.
+ Location location
+
+ # Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.
+ event paused
+ parameters
+ # Call stack the virtual machine stopped on.
+ array of CallFrame callFrames
+ # Pause reason.
+ enum reason
+ XHR
+ DOM
+ EventListener
+ exception
+ assert
+ debugCommand
+ promiseRejection
+ OOM
+ other
+ ambiguous
+ # Object containing break-specific auxiliary properties.
+ optional object data
+ # Hit breakpoints IDs
+ optional array of string hitBreakpoints
+ # Async stack trace, if any.
+ optional Runtime.StackTrace asyncStackTrace
+ # Async stack trace, if any.
+ experimental optional Runtime.StackTraceId asyncStackTraceId
+ # Just scheduled async call will have this stack trace as parent stack during async execution.
+ # This field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.
+ experimental optional Runtime.StackTraceId asyncCallStackTraceId
+
+ # Fired when the virtual machine resumed execution.
+ event resumed
+
+ # Fired when virtual machine fails to parse the script.
+ event scriptFailedToParse
+ parameters
+ # Identifier of the script parsed.
+ Runtime.ScriptId scriptId
+ # URL or name of the script parsed (if any).
+ string url
+ # Line offset of the script within the resource with given URL (for script tags).
+ integer startLine
+ # Column offset of the script within the resource with given URL.
+ integer startColumn
+ # Last line of the script.
+ integer endLine
+ # Length of the last line of the script.
+ integer endColumn
+ # Specifies script creation context.
+ Runtime.ExecutionContextId executionContextId
+ # Content hash of the script.
+ string hash
+ # Embedder-specific auxiliary data.
+ optional object executionContextAuxData
+ # URL of source map associated with script (if any).
+ optional string sourceMapURL
+ # True, if this script has sourceURL.
+ optional boolean hasSourceURL
+ # True, if this script is ES6 module.
+ optional boolean isModule
+ # This script length.
+ optional integer length
+ # JavaScript top stack frame of where the script parsed event was triggered if available.
+ experimental optional Runtime.StackTrace stackTrace
+
+ # Fired when virtual machine parses script. This event is also fired for all known and uncollected
+ # scripts upon enabling debugger.
+ event scriptParsed
+ parameters
+ # Identifier of the script parsed.
+ Runtime.ScriptId scriptId
+ # URL or name of the script parsed (if any).
+ string url
+ # Line offset of the script within the resource with given URL (for script tags).
+ integer startLine
+ # Column offset of the script within the resource with given URL.
+ integer startColumn
+ # Last line of the script.
+ integer endLine
+ # Length of the last line of the script.
+ integer endColumn
+ # Specifies script creation context.
+ Runtime.ExecutionContextId executionContextId
+ # Content hash of the script.
+ string hash
+ # Embedder-specific auxiliary data.
+ optional object executionContextAuxData
+ # True, if this script is generated as a result of the live edit operation.
+ experimental optional boolean isLiveEdit
+ # URL of source map associated with script (if any).
+ optional string sourceMapURL
+ # True, if this script has sourceURL.
+ optional boolean hasSourceURL
+ # True, if this script is ES6 module.
+ optional boolean isModule
+ # This script length.
+ optional integer length
+ # JavaScript top stack frame of where the script parsed event was triggered if available.
+ experimental optional Runtime.StackTrace stackTrace
+
+experimental domain HeapProfiler
+ depends on Runtime
+
+ # Heap snapshot object id.
+ type HeapSnapshotObjectId extends string
+
+ # Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.
+ type SamplingHeapProfileNode extends object
+ properties
+ # Function location.
+ Runtime.CallFrame callFrame
+ # Allocations size in bytes for the node excluding children.
+ number selfSize
+ # Child nodes.
+ array of SamplingHeapProfileNode children
+
+ # Profile.
+ type SamplingHeapProfile extends object
+ properties
+ SamplingHeapProfileNode head
+
+ # Enables console to refer to the node with given id via $x (see Command Line API for more details
+ # $x functions).
+ command addInspectedHeapObject
+ parameters
+ # Heap snapshot object id to be accessible by means of $x command line API.
+ HeapSnapshotObjectId heapObjectId
+
+ command collectGarbage
+
+ command disable
+
+ command enable
+
+ command getHeapObjectId
+ parameters
+ # Identifier of the object to get heap object id for.
+ Runtime.RemoteObjectId objectId
+ returns
+ # Id of the heap snapshot object corresponding to the passed remote object id.
+ HeapSnapshotObjectId heapSnapshotObjectId
+
+ command getObjectByHeapObjectId
+ parameters
+ HeapSnapshotObjectId objectId
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ returns
+ # Evaluation result.
+ Runtime.RemoteObject result
+
+ command getSamplingProfile
+ returns
+ # Return the sampling profile being collected.
+ SamplingHeapProfile profile
+
+ command startSampling
+ parameters
+ # Average sample interval in bytes. Poisson distribution is used for the intervals. The
+ # default value is 32768 bytes.
+ optional number samplingInterval
+
+ command startTrackingHeapObjects
+ parameters
+ optional boolean trackAllocations
+
+ command stopSampling
+ returns
+ # Recorded sampling heap profile.
+ SamplingHeapProfile profile
+
+ command stopTrackingHeapObjects
+ parameters
+ # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken
+ # when the tracking is stopped.
+ optional boolean reportProgress
+
+ command takeHeapSnapshot
+ parameters
+ # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.
+ optional boolean reportProgress
+
+ event addHeapSnapshotChunk
+ parameters
+ string chunk
+
+ # If heap objects tracking has been started then backend may send update for one or more fragments
+ event heapStatsUpdate
+ parameters
+ # An array of triplets. Each triplet describes a fragment. The first integer is the fragment
+ # index, the second integer is a total count of objects for the fragment, the third integer is
+ # a total size of the objects for the fragment.
+ array of integer statsUpdate
+
+ # If heap objects tracking has been started then backend regularly sends a current value for last
+ # seen object id and corresponding timestamp. If the were changes in the heap since last event
+ # then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.
+ event lastSeenObjectId
+ parameters
+ integer lastSeenObjectId
+ number timestamp
+
+ event reportHeapSnapshotProgress
+ parameters
+ integer done
+ integer total
+ optional boolean finished
+
+ event resetProfiles
+
+domain Profiler
+ depends on Runtime
+ depends on Debugger
+
+ # Profile node. Holds callsite information, execution statistics and child nodes.
+ type ProfileNode extends object
+ properties
+ # Unique id of the node.
+ integer id
+ # Function location.
+ Runtime.CallFrame callFrame
+ # Number of samples where this node was on top of the call stack.
+ optional integer hitCount
+ # Child node ids.
+ optional array of integer children
+ # The reason of being not optimized. The function may be deoptimized or marked as don't
+ # optimize.
+ optional string deoptReason
+ # An array of source position ticks.
+ optional array of PositionTickInfo positionTicks
+
+ # Profile.
+ type Profile extends object
+ properties
+ # The list of profile nodes. First item is the root node.
+ array of ProfileNode nodes
+ # Profiling start timestamp in microseconds.
+ number startTime
+ # Profiling end timestamp in microseconds.
+ number endTime
+ # Ids of samples top nodes.
+ optional array of integer samples
+ # Time intervals between adjacent samples in microseconds. The first delta is relative to the
+ # profile startTime.
+ optional array of integer timeDeltas
+
+ # Specifies a number of samples attributed to a certain source position.
+ type PositionTickInfo extends object
+ properties
+ # Source line number (1-based).
+ integer line
+ # Number of samples attributed to the source line.
+ integer ticks
+
+ # Coverage data for a source range.
+ type CoverageRange extends object
+ properties
+ # JavaScript script source offset for the range start.
+ integer startOffset
+ # JavaScript script source offset for the range end.
+ integer endOffset
+ # Collected execution count of the source range.
+ integer count
+
+ # Coverage data for a JavaScript function.
+ type FunctionCoverage extends object
+ properties
+ # JavaScript function name.
+ string functionName
+ # Source ranges inside the function with coverage data.
+ array of CoverageRange ranges
+ # Whether coverage data for this function has block granularity.
+ boolean isBlockCoverage
+
+ # Coverage data for a JavaScript script.
+ type ScriptCoverage extends object
+ properties
+ # JavaScript script id.
+ Runtime.ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # Functions contained in the script that has coverage data.
+ array of FunctionCoverage functions
+
+ # Describes a type collected during runtime.
+ experimental type TypeObject extends object
+ properties
+ # Name of a type collected with type profiling.
+ string name
+
+ # Source offset and types for a parameter or return value.
+ experimental type TypeProfileEntry extends object
+ properties
+ # Source offset of the parameter or end of function for return values.
+ integer offset
+ # The types for this parameter or return value.
+ array of TypeObject types
+
+ # Type profile data collected during runtime for a JavaScript script.
+ experimental type ScriptTypeProfile extends object
+ properties
+ # JavaScript script id.
+ Runtime.ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # Type profile entries for parameters and return values of the functions in the script.
+ array of TypeProfileEntry entries
+
+ command disable
+
+ command enable
+
+ # Collect coverage data for the current isolate. The coverage data may be incomplete due to
+ # garbage collection.
+ command getBestEffortCoverage
+ returns
+ # Coverage data for the current isolate.
+ array of ScriptCoverage result
+
+ # Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
+ command setSamplingInterval
+ parameters
+ # New sampling interval in microseconds.
+ integer interval
+
+ command start
+
+ # Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
+ # coverage may be incomplete. Enabling prevents running optimized code and resets execution
+ # counters.
+ command startPreciseCoverage
+ parameters
+ # Collect accurate call counts beyond simple 'covered' or 'not covered'.
+ optional boolean callCount
+ # Collect block-based coverage.
+ optional boolean detailed
+
+ # Enable type profile.
+ experimental command startTypeProfile
+
+ command stop
+ returns
+ # Recorded profile.
+ Profile profile
+
+ # Disable precise code coverage. Disabling releases unnecessary execution count records and allows
+ # executing optimized code.
+ command stopPreciseCoverage
+
+ # Disable type profile. Disabling releases type profile data collected so far.
+ experimental command stopTypeProfile
+
+ # Collect coverage data for the current isolate, and resets execution counters. Precise code
+ # coverage needs to have started.
+ command takePreciseCoverage
+ returns
+ # Coverage data for the current isolate.
+ array of ScriptCoverage result
+
+ # Collect type profile.
+ experimental command takeTypeProfile
+ returns
+ # Type profile for all scripts since startTypeProfile() was turned on.
+ array of ScriptTypeProfile result
+
+ event consoleProfileFinished
+ parameters
+ string id
+ # Location of console.profileEnd().
+ Debugger.Location location
+ Profile profile
+ # Profile title passed as an argument to console.profile().
+ optional string title
+
+ # Sent when new profile recording is started using console.profile() call.
+ event consoleProfileStarted
+ parameters
+ string id
+ # Location of console.profile().
+ Debugger.Location location
+ # Profile title passed as an argument to console.profile().
+ optional string title
+
+# Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.
+# Evaluation results are returned as mirror object that expose object type, string representation
+# and unique identifier that can be used for further object reference. Original objects are
+# maintained in memory unless they are either explicitly released or are released along with the
+# other objects in their object group.
+domain Runtime
+
+ # Unique script identifier.
+ type ScriptId extends string
+
+ # Unique object identifier.
+ type RemoteObjectId extends string
+
+ # Primitive value which cannot be JSON-stringified.
+ type UnserializableValue extends string
+ enum
+ Infinity
+ NaN
+ -Infinity
+ -0
+
+ # Mirror object referencing original JavaScript object.
+ type RemoteObject extends object
+ properties
+ # Object type.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+ proxy
+ promise
+ typedarray
+ # Object class (constructor) name. Specified for `object` type values only.
+ optional string className
+ # Remote object value in case of primitive values or JSON values (if it was requested).
+ optional any value
+ # Primitive value which can not be JSON-stringified does not have `value`, but gets this
+ # property.
+ optional UnserializableValue unserializableValue
+ # String representation of the object.
+ optional string description
+ # Unique object identifier (for non-primitive values).
+ optional RemoteObjectId objectId
+ # Preview containing abbreviated property values. Specified for `object` type values only.
+ experimental optional ObjectPreview preview
+ experimental optional CustomPreview customPreview
+
+ experimental type CustomPreview extends object
+ properties
+ string header
+ boolean hasBody
+ RemoteObjectId formatterObjectId
+ RemoteObjectId bindRemoteObjectFunctionId
+ optional RemoteObjectId configObjectId
+
+ # Object containing abbreviated remote object value.
+ experimental type ObjectPreview extends object
+ properties
+ # Object type.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+ # String representation of the object.
+ optional string description
+ # True iff some of the properties or entries of the original object did not fit.
+ boolean overflow
+ # List of the properties.
+ array of PropertyPreview properties
+ # List of the entries. Specified for `map` and `set` subtype values only.
+ optional array of EntryPreview entries
+
+ experimental type PropertyPreview extends object
+ properties
+ # Property name.
+ string name
+ # Object type. Accessor means that the property itself is an accessor property.
+ enum type
+ object
+ function
+ undefined
+ string
+ number
+ boolean
+ symbol
+ accessor
+ # User-friendly property value string.
+ optional string value
+ # Nested value preview.
+ optional ObjectPreview valuePreview
+ # Object subtype hint. Specified for `object` type values only.
+ optional enum subtype
+ array
+ null
+ node
+ regexp
+ date
+ map
+ set
+ weakmap
+ weakset
+ iterator
+ generator
+ error
+
+ experimental type EntryPreview extends object
+ properties
+ # Preview of the key. Specified for map-like collection entries.
+ optional ObjectPreview key
+ # Preview of the value.
+ ObjectPreview value
+
+ # Object property descriptor.
+ type PropertyDescriptor extends object
+ properties
+ # Property name or symbol description.
+ string name
+ # The value associated with the property.
+ optional RemoteObject value
+ # True if the value associated with the property may be changed (data descriptors only).
+ optional boolean writable
+ # A function which serves as a getter for the property, or `undefined` if there is no getter
+ # (accessor descriptors only).
+ optional RemoteObject get
+ # A function which serves as a setter for the property, or `undefined` if there is no setter
+ # (accessor descriptors only).
+ optional RemoteObject set
+ # True if the type of this property descriptor may be changed and if the property may be
+ # deleted from the corresponding object.
+ boolean configurable
+ # True if this property shows up during enumeration of the properties on the corresponding
+ # object.
+ boolean enumerable
+ # True if the result was thrown during the evaluation.
+ optional boolean wasThrown
+ # True if the property is owned for the object.
+ optional boolean isOwn
+ # Property symbol object, if the property is of the `symbol` type.
+ optional RemoteObject symbol
+
+ # Object internal property descriptor. This property isn't normally visible in JavaScript code.
+ type InternalPropertyDescriptor extends object
+ properties
+ # Conventional property name.
+ string name
+ # The value associated with the property.
+ optional RemoteObject value
+
+ # Represents function call argument. Either remote object id `objectId`, primitive `value`,
+ # unserializable primitive value or neither of (for undefined) them should be specified.
+ type CallArgument extends object
+ properties
+ # Primitive value or serializable javascript object.
+ optional any value
+ # Primitive value which can not be JSON-stringified.
+ optional UnserializableValue unserializableValue
+ # Remote object handle.
+ optional RemoteObjectId objectId
+
+ # Id of an execution context.
+ type ExecutionContextId extends integer
+
+ # Description of an isolated world.
+ type ExecutionContextDescription extends object
+ properties
+ # Unique id of the execution context. It can be used to specify in which execution context
+ # script evaluation should be performed.
+ ExecutionContextId id
+ # Execution context origin.
+ string origin
+ # Human readable name describing given context.
+ string name
+ # Embedder-specific auxiliary data.
+ optional object auxData
+
+ # Detailed information about exception (or error) that was thrown during script compilation or
+ # execution.
+ type ExceptionDetails extends object
+ properties
+ # Exception id.
+ integer exceptionId
+ # Exception text, which should be used together with exception object when available.
+ string text
+ # Line number of the exception location (0-based).
+ integer lineNumber
+ # Column number of the exception location (0-based).
+ integer columnNumber
+ # Script ID of the exception location.
+ optional ScriptId scriptId
+ # URL of the exception location, to be used when the script was not reported.
+ optional string url
+ # JavaScript stack trace if available.
+ optional StackTrace stackTrace
+ # Exception object if available.
+ optional RemoteObject exception
+ # Identifier of the context where exception happened.
+ optional ExecutionContextId executionContextId
+
+ # Number of milliseconds since epoch.
+ type Timestamp extends number
+
+ # Stack entry for runtime errors and assertions.
+ type CallFrame extends object
+ properties
+ # JavaScript function name.
+ string functionName
+ # JavaScript script id.
+ ScriptId scriptId
+ # JavaScript script name or url.
+ string url
+ # JavaScript script line number (0-based).
+ integer lineNumber
+ # JavaScript script column number (0-based).
+ integer columnNumber
+
+ # Call frames for assertions or error messages.
+ type StackTrace extends object
+ properties
+ # String label of this stack trace. For async traces this may be a name of the function that
+ # initiated the async call.
+ optional string description
+ # JavaScript function name.
+ array of CallFrame callFrames
+ # Asynchronous JavaScript stack trace that preceded this stack, if available.
+ optional StackTrace parent
+ # Asynchronous JavaScript stack trace that preceded this stack, if available.
+ experimental optional StackTraceId parentId
+
+ # Unique identifier of current debugger.
+ experimental type UniqueDebuggerId extends string
+
+ # If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This
+ # allows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.
+ experimental type StackTraceId extends object
+ properties
+ string id
+ optional UniqueDebuggerId debuggerId
+
+ # Add handler to promise with given promise object id.
+ command awaitPromise
+ parameters
+ # Identifier of the promise.
+ RemoteObjectId promiseObjectId
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ optional boolean generatePreview
+ returns
+ # Promise result. Will contain rejected value if promise was rejected.
+ RemoteObject result
+ # Exception details if stack strace is available.
+ optional ExceptionDetails exceptionDetails
+
+ # Calls function with given declaration on the given object. Object group of the result is
+ # inherited from the target object.
+ command callFunctionOn
+ parameters
+ # Declaration of the function to call.
+ string functionDeclaration
+ # Identifier of the object to call function on. Either objectId or executionContextId should
+ # be specified.
+ optional RemoteObjectId objectId
+ # Call arguments. All call arguments must belong to the same JavaScript world as the target
+ # object.
+ optional array of CallArgument arguments
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Whether the result is expected to be a JSON object which should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether execution should be treated as initiated by user in the UI.
+ optional boolean userGesture
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ # Specifies execution context which global object will be used to call function on. Either
+ # executionContextId or objectId should be specified.
+ optional ExecutionContextId executionContextId
+ # Symbolic group name that can be used to release multiple objects. If objectGroup is not
+ # specified and objectId is, objectGroup will be inherited from object.
+ optional string objectGroup
+ returns
+ # Call result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Compiles expression.
+ command compileScript
+ parameters
+ # Expression to compile.
+ string expression
+ # Source url to be set for the script.
+ string sourceURL
+ # Specifies whether the compiled script should be persisted.
+ boolean persistScript
+ # Specifies in which execution context to perform script run. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId executionContextId
+ returns
+ # Id of the script.
+ optional ScriptId scriptId
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Disables reporting of execution contexts creation.
+ command disable
+
+ # Discards collected exceptions and console API calls.
+ command discardConsoleEntries
+
+ # Enables reporting of execution contexts creation by means of `executionContextCreated` event.
+ # When the reporting gets enabled the event will be sent immediately for each existing execution
+ # context.
+ command enable
+
+ # Evaluates expression on global object.
+ command evaluate
+ parameters
+ # Expression to evaluate.
+ string expression
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ # Determines whether Command Line API should be available during the evaluation.
+ optional boolean includeCommandLineAPI
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Specifies in which execution context to perform evaluation. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId contextId
+ # Whether the result is expected to be a JSON object that should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ experimental optional boolean generatePreview
+ # Whether execution should be treated as initiated by user in the UI.
+ optional boolean userGesture
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ returns
+ # Evaluation result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Returns properties of a given object. Object group of the result is inherited from the target
+ # object.
+ command getProperties
+ parameters
+ # Identifier of the object to return properties for.
+ RemoteObjectId objectId
+ # If true, returns properties belonging only to the element itself, not to its prototype
+ # chain.
+ optional boolean ownProperties
+ # If true, returns accessor properties (with getter/setter) only; internal properties are not
+ # returned either.
+ experimental optional boolean accessorPropertiesOnly
+ # Whether preview should be generated for the results.
+ experimental optional boolean generatePreview
+ returns
+ # Object properties.
+ array of PropertyDescriptor result
+ # Internal object properties (only of the element itself).
+ optional array of InternalPropertyDescriptor internalProperties
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ # Returns all let, const and class variables from global scope.
+ command globalLexicalScopeNames
+ parameters
+ # Specifies in which execution context to lookup global scope variables.
+ optional ExecutionContextId executionContextId
+ returns
+ array of string names
+
+ command queryObjects
+ parameters
+ # Identifier of the prototype to return objects for.
+ RemoteObjectId prototypeObjectId
+ returns
+ # Array with objects.
+ RemoteObject objects
+
+ # Releases remote object with given id.
+ command releaseObject
+ parameters
+ # Identifier of the object to release.
+ RemoteObjectId objectId
+
+ # Releases all remote objects that belong to a given group.
+ command releaseObjectGroup
+ parameters
+ # Symbolic object group name.
+ string objectGroup
+
+ # Tells inspected instance to run if it was waiting for debugger to attach.
+ command runIfWaitingForDebugger
+
+ # Runs script with given id in a given context.
+ command runScript
+ parameters
+ # Id of the script to run.
+ ScriptId scriptId
+ # Specifies in which execution context to perform script run. If the parameter is omitted the
+ # evaluation will be performed in the context of the inspected page.
+ optional ExecutionContextId executionContextId
+ # Symbolic group name that can be used to release multiple objects.
+ optional string objectGroup
+ # In silent mode exceptions thrown during evaluation are not reported and do not pause
+ # execution. Overrides `setPauseOnException` state.
+ optional boolean silent
+ # Determines whether Command Line API should be available during the evaluation.
+ optional boolean includeCommandLineAPI
+ # Whether the result is expected to be a JSON object which should be sent by value.
+ optional boolean returnByValue
+ # Whether preview should be generated for the result.
+ optional boolean generatePreview
+ # Whether execution should `await` for resulting value and return once awaited promise is
+ # resolved.
+ optional boolean awaitPromise
+ returns
+ # Run result.
+ RemoteObject result
+ # Exception details.
+ optional ExceptionDetails exceptionDetails
+
+ experimental command setCustomObjectFormatterEnabled
+ parameters
+ boolean enabled
+
+ # Issued when console API was called.
+ event consoleAPICalled
+ parameters
+ # Type of the call.
+ enum type
+ log
+ debug
+ info
+ error
+ warning
+ dir
+ dirxml
+ table
+ trace
+ clear
+ startGroup
+ startGroupCollapsed
+ endGroup
+ assert
+ profile
+ profileEnd
+ count
+ timeEnd
+ # Call arguments.
+ array of RemoteObject args
+ # Identifier of the context where the call was made.
+ ExecutionContextId executionContextId
+ # Call timestamp.
+ Timestamp timestamp
+ # Stack trace captured when the call was made.
+ optional StackTrace stackTrace
+ # Console context descriptor for calls on non-default console context (not console.*):
+ # 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call
+ # on named context.
+ experimental optional string context
+
+ # Issued when unhandled exception was revoked.
+ event exceptionRevoked
+ parameters
+ # Reason describing why exception was revoked.
+ string reason
+ # The id of revoked exception, as reported in `exceptionThrown`.
+ integer exceptionId
+
+ # Issued when exception was thrown and unhandled.
+ event exceptionThrown
+ parameters
+ # Timestamp of the exception.
+ Timestamp timestamp
+ ExceptionDetails exceptionDetails
+
+ # Issued when new execution context is created.
+ event executionContextCreated
+ parameters
+ # A newly created execution context.
+ ExecutionContextDescription context
+
+ # Issued when execution context is destroyed.
+ event executionContextDestroyed
+ parameters
+ # Id of the destroyed context
+ ExecutionContextId executionContextId
+
+ # Issued when all executionContexts were cleared in browser
+ event executionContextsCleared
+
+ # Issued when object should be inspected (for example, as a result of inspect() command line API
+ # call).
+ event inspectRequested
+ parameters
+ RemoteObject object
+ object hints
+
+# This domain is deprecated.
+deprecated domain Schema
+
+ # Description of the protocol domain.
+ type Domain extends object
+ properties
+ # Domain name.
+ string name
+ # Domain version.
+ string version
+
+ # Returns supported domains.
+ command getDomains
+ returns
+ # List of supported domains.
+ array of Domain domains
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 36a0cca26c..dc753fee40 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -162,15 +162,15 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
* @return TRUE or FALSE
* @stable ICU 2.8
*/
-#define U_IS_BMP(c) ((uint32_t)(c) <= 0xffff)
+#define U_IS_BMP(c) ((uint32_t)(c) <= 0xFFFF)
/**
- * Is this code point a supplementary code point (U+10000..U+10ffff)?
+ * Is this code point a supplementary code point (U+010000..U+10FFFF)?
* @param c 32-bit code point
* @return TRUE or FALSE
* @stable ICU 2.8
*/
-#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x10000) <= 0xfffff)
+#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x010000) <= 0xFFFFF)
/**
* Is this code point a surrogate (U+d800..U+dfff)?
@@ -178,25 +178,25 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define U_IS_SURROGATE(c) (((c)&0xfffff800) == 0xd800)
+#define U_IS_SURROGATE(c) (((c)&0xFFFFF800) == 0xD800)
/**
- * Get the lead surrogate (0xd800..0xdbff) for a
- * supplementary code point (0x10000..0x10ffff).
- * @param supplementary 32-bit code point (U+10000..U+10ffff)
- * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * Get the lead surrogate (0xD800..0xDBFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return lead surrogate (U+D800..U+DBFF) for supplementary
* @stable ICU 2.4
*/
-#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xd7c0)
+#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xD7C0)
/**
- * Get the trail surrogate (0xdc00..0xdfff) for a
- * supplementary code point (0x10000..0x10ffff).
- * @param supplementary 32-bit code point (U+10000..U+10ffff)
- * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * Get the trail surrogate (0xDC00..0xDFFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return trail surrogate (U+DC00..U+DFFF) for supplementary
* @stable ICU 2.4
*/
-#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff) | 0xdc00)
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3FF) | 0xDC00)
// This must be called with the length pre-determined by the first byte.
// If presented with a length > 4, this returns false. The Unicode
@@ -329,7 +329,7 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
}
*target++ = U16_LEAD(character);
*target++ = U16_TRAIL(character);
- orAllData = 0xffff;
+ orAllData = 0xFFFF;
} else {
if (strict) {
source -= utf8SequenceLength; // return to the start
@@ -344,7 +344,7 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
*sourceStart = source;
*targetStart = target;
- if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7f);
+ if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7F);
return result;
}
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 1129eac676..e96e89c0eb 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -528,6 +528,11 @@ double V8ConsoleMessageStorage::timeEnd(int contextId, const String16& id) {
return elapsed;
}
+bool V8ConsoleMessageStorage::hasTimer(int contextId, const String16& id) {
+ const std::map<String16, double>& time = m_data[contextId].m_time;
+ return time.find(id) != time.end();
+}
+
void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
m_estimatedSize = 0;
for (size_t i = 0; i < m_messages.size(); ++i) {
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index 57f692f6db..f82f8e5a13 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -120,6 +120,7 @@ class V8ConsoleMessageStorage {
int count(int contextId, const String16& id);
void time(int contextId, const String16& id);
double timeEnd(int contextId, const String16& id);
+ bool hasTimer(int contextId, const String16& id);
private:
V8InspectorImpl* m_inspector;
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 7a0caf08a1..fa04209dec 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -284,7 +284,7 @@ void V8Console::Clear(const v8::debug::ConsoleCallArguments& info,
void V8Console::Count(const v8::debug::ConsoleCallArguments& info,
const v8::debug::ConsoleContext& consoleContext) {
ConsoleHelper helper(info, consoleContext, m_inspector);
- String16 title = helper.firstArgToString(String16());
+ String16 title = helper.firstArgToString(String16("default"), false);
String16 identifier;
if (title.isEmpty()) {
std::unique_ptr<V8StackTraceImpl> stackTrace =
@@ -354,10 +354,16 @@ static void timeFunction(const v8::debug::ConsoleCallArguments& info,
ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ const String16& timerId =
+ protocolTitle + "@" + consoleContextToString(consoleContext);
+ if (helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
+ helper.reportCallWithArgument(
+ ConsoleAPIType::kWarning,
+ "Timer '" + protocolTitle + "' already exists");
+ return;
+ }
inspector->client()->consoleTime(toStringView(protocolTitle));
- helper.consoleMessageStorage()->time(
- helper.contextId(),
- protocolTitle + "@" + consoleContextToString(consoleContext));
+ helper.consoleMessageStorage()->time(helper.contextId(), timerId);
}
static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
@@ -366,6 +372,14 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ const String16& timerId =
+ protocolTitle + "@" + consoleContextToString(consoleContext);
+ if (!helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
+ helper.reportCallWithArgument(
+ ConsoleAPIType::kWarning,
+ "Timer '" + protocolTitle + "' does not exist");
+ return;
+ }
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
double elapsed = helper.consoleMessageStorage()->timeEnd(
helper.contextId(),
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 8e5142d36e..7bfde09b71 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -553,7 +553,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
}
std::unique_ptr<protocol::Debugger::Location> location = setBreakpointImpl(
breakpointId, script.first, condition, lineNumber, columnNumber);
- if (type != BreakpointType::kByUrlRegex) {
+ if (location && type != BreakpointType::kByUrlRegex) {
hint = breakpointHint(*script.second, lineNumber, columnNumber);
}
if (location) (*locations)->addItem(std::move(location));
@@ -1330,6 +1330,24 @@ V8DebuggerAgentImpl::currentExternalStackTrace() {
.build();
}
+std::unique_ptr<protocol::Runtime::StackTraceId>
+V8DebuggerAgentImpl::currentScheduledAsyncCall() {
+ v8_inspector::V8StackTraceId scheduledAsyncCall =
+ m_debugger->scheduledAsyncCall();
+ if (scheduledAsyncCall.IsInvalid()) return nullptr;
+ std::unique_ptr<protocol::Runtime::StackTraceId> asyncCallStackTrace =
+ protocol::Runtime::StackTraceId::create()
+ .setId(stackTraceIdToString(scheduledAsyncCall.id))
+ .build();
+ // TODO(kozyatinskiy): extract this check to IsLocal function.
+ if (scheduledAsyncCall.debugger_id.first ||
+ scheduledAsyncCall.debugger_id.second) {
+ asyncCallStackTrace->setDebuggerId(
+ debuggerIdToString(scheduledAsyncCall.debugger_id));
+ }
+ return asyncCallStackTrace;
+}
+
bool V8DebuggerAgentImpl::isPaused() const {
return m_debugger->isPausedInContextGroup(m_session->contextGroupId());
}
@@ -1532,22 +1550,10 @@ void V8DebuggerAgentImpl::didPause(
Response response = currentCallFrames(&protocolCallFrames);
if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
- Maybe<protocol::Runtime::StackTraceId> asyncCallStackTrace;
- void* rawScheduledAsyncTask = m_debugger->scheduledAsyncTask();
- if (rawScheduledAsyncTask) {
- asyncCallStackTrace =
- protocol::Runtime::StackTraceId::create()
- .setId(stackTraceIdToString(
- reinterpret_cast<uintptr_t>(rawScheduledAsyncTask)))
- .setDebuggerId(debuggerIdToString(
- m_debugger->debuggerIdFor(m_session->contextGroupId())))
- .build();
- }
-
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
currentAsyncStackTrace(), currentExternalStackTrace(),
- std::move(asyncCallStackTrace));
+ currentScheduledAsyncCall());
}
void V8DebuggerAgentImpl::didContinue() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index e697b700e9..168c5a7724 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -156,6 +156,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
std::unique_ptr<protocol::Runtime::StackTraceId> currentExternalStackTrace();
+ std::unique_ptr<protocol::Runtime::StackTraceId> currentScheduledAsyncCall();
void setPauseOnExceptionsImpl(int);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 8f843b54b2..c86f320252 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -331,11 +331,7 @@ void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
m_targetContextGroupId = targetContextGroupId;
m_taskWithScheduledBreak = reinterpret_cast<void*>(task);
- String16 currentDebuggerId =
- debuggerIdToString(debuggerIdFor(targetContextGroupId));
- if (currentDebuggerId != debuggerId) {
- m_taskWithScheduledBreakDebuggerId = debuggerId;
- }
+ m_taskWithScheduledBreakDebuggerId = debuggerId;
}
Response V8Debugger::continueToLocation(
@@ -542,19 +538,18 @@ void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
switch (type) {
case v8::debug::kDebugAsyncFunctionPromiseCreated:
asyncTaskScheduledForStack("async function", task, true);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseThen:
asyncTaskScheduledForStack("Promise.then", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugPromiseCatch:
asyncTaskScheduledForStack("Promise.catch", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugPromiseFinally:
asyncTaskScheduledForStack("Promise.finally", task, false);
- if (!isBlackboxed) asyncTaskCandidateForStepping(task);
+ if (!isBlackboxed) asyncTaskCandidateForStepping(task, true);
break;
case v8::debug::kDebugWillHandle:
asyncTaskStartedForStack(task);
@@ -767,7 +762,7 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace(
++m_asyncStacksCount;
collectOldAsyncStacksIfNeeded();
- asyncTaskCandidateForStepping(reinterpret_cast<void*>(id));
+ asyncTaskCandidateForStepping(reinterpret_cast<void*>(id), false);
return V8StackTraceId(id, debuggerIdFor(contextGroupId));
}
@@ -816,7 +811,7 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
asyncTaskScheduledForStack(toString16(taskName), task, recurring);
- asyncTaskCandidateForStepping(task);
+ asyncTaskCandidateForStepping(task, true);
}
void V8Debugger::asyncTaskCanceled(void* task) {
@@ -890,16 +885,23 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
}
}
-void V8Debugger::asyncTaskCandidateForStepping(void* task) {
- if (m_pauseOnAsyncCall) {
- m_scheduledAsyncTask = task;
+void V8Debugger::asyncTaskCandidateForStepping(void* task, bool isLocal) {
+ int contextGroupId = currentContextGroupId();
+ if (m_pauseOnAsyncCall && contextGroupId) {
+ if (isLocal) {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0));
+ } else {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId));
+ }
breakProgram(m_targetContextGroupId);
- m_scheduledAsyncTask = nullptr;
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId();
return;
}
if (!m_stepIntoAsyncCallback) return;
DCHECK(m_targetContextGroupId);
- if (currentContextGroupId() != m_targetContextGroupId) return;
+ if (contextGroupId != m_targetContextGroupId) return;
m_taskWithScheduledBreak = task;
v8::debug::ClearStepping(m_isolate);
m_stepIntoAsyncCallback->sendSuccess();
@@ -1031,6 +1033,7 @@ std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(int contextGroupId) {
std::pair<int64_t, int64_t> debuggerId(
v8::debug::GetNextRandomInt64(m_isolate),
v8::debug::GetNextRandomInt64(m_isolate));
+ if (!debuggerId.first && !debuggerId.second) ++debuggerId.first;
m_contextGroupIdToDebuggerId.insert(
it, std::make_pair(contextGroupId, debuggerId));
m_serializedDebuggerIdToDebuggerId.insert(
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 455bb5952d..4828fcad52 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -117,7 +117,9 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setMaxAsyncTaskStacksForTest(int limit);
void dumpAsyncTaskStacksStateForTest();
- void* scheduledAsyncTask() { return m_scheduledAsyncTask; }
+ v8_inspector::V8StackTraceId scheduledAsyncCall() {
+ return m_scheduledAsyncCall;
+ }
std::pair<int64_t, int64_t> debuggerIdFor(int contextGroupId);
std::pair<int64_t, int64_t> debuggerIdFor(
@@ -155,7 +157,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
void asyncTaskStartedForStack(void* task);
void asyncTaskFinishedForStack(void* task);
- void asyncTaskCandidateForStepping(void* task);
+ void asyncTaskCandidateForStepping(void* task, bool isLocal);
void asyncTaskStartedForStepping(void* task);
void asyncTaskFinishedForStepping(void* task);
void asyncTaskCanceledForStepping(void* task);
@@ -219,7 +221,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
bool m_pauseOnAsyncCall = false;
- void* m_scheduledAsyncTask = nullptr;
+ v8_inspector::V8StackTraceId m_scheduledAsyncCall;
using StackTraceIdToStackTrace =
protocol::HashMap<uintptr_t, std::weak_ptr<AsyncStackTrace>>;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 8af3edf7e1..b876a956b2 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -63,7 +63,7 @@ class GlobalObjectNameResolver final
if (m_offset + length + 1 >= m_strings.size()) return "";
for (size_t i = 0; i < length; ++i) {
UChar ch = name[i];
- m_strings[m_offset + i] = ch > 0xff ? '?' : static_cast<char>(ch);
+ m_strings[m_offset + i] = ch > 0xFF ? '?' : static_cast<char>(ch);
}
m_strings[m_offset + length] = '\0';
char* result = &*m_strings.begin() + m_offset;
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index ef978ceda3..1455cf6dbc 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -44,6 +44,15 @@ V8InspectorImpl* unwrapInspector(
return inspector;
}
+template <typename TypedArray>
+void addTypedArrayProperty(std::vector<v8::Local<v8::Value>>* props,
+ v8::Isolate* isolate,
+ v8::Local<v8::ArrayBuffer> arraybuffer,
+ String16 name, size_t length) {
+ props->push_back(toV8String(isolate, name));
+ props->push_back(TypedArray::New(arraybuffer, 0, length));
+}
+
} // namespace
v8::Local<v8::Object> V8InjectedScriptHost::create(
@@ -84,6 +93,9 @@ v8::Local<v8::Object> V8InjectedScriptHost::create(
setFunctionProperty(context, injectedScriptHost, "nativeAccessorDescriptor",
V8InjectedScriptHost::nativeAccessorDescriptorCallback,
debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "typedArrayProperties",
+ V8InjectedScriptHost::typedArrayPropertiesCallback,
+ debuggerExternal);
createDataProperty(context, injectedScriptHost,
toV8StringInternalized(isolate, "keys"),
v8::debug::GetBuiltin(isolate, v8::debug::kObjectKeys));
@@ -335,7 +347,7 @@ void V8InjectedScriptHost::proxyTargetValueCallback(
UNREACHABLE();
return;
}
- v8::Local<v8::Object> target = info[0].As<v8::Proxy>();
+ v8::Local<v8::Value> target = info[0].As<v8::Proxy>();
while (target->IsProxy())
target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
info.GetReturnValue().Set(target);
@@ -374,4 +386,40 @@ void V8InjectedScriptHost::nativeAccessorDescriptorCallback(
info.GetReturnValue().Set(result);
}
+void V8InjectedScriptHost::typedArrayPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ if (info.Length() != 1 || !info[0]->IsArrayBuffer()) return;
+
+ v8::TryCatch tryCatch(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ v8::Local<v8::ArrayBuffer> arrayBuffer = info[0].As<v8::ArrayBuffer>();
+ size_t length = arrayBuffer->ByteLength();
+ if (length == 0) return;
+ std::vector<v8::Local<v8::Value>> arrays_vector;
+ addTypedArrayProperty<v8::Int8Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int8Array]]", length);
+ addTypedArrayProperty<v8::Uint8Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Uint8Array]]", length);
+
+ if (length % 2 == 0) {
+ addTypedArrayProperty<v8::Int16Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int16Array]]", length / 2);
+ }
+ if (length % 4 == 0) {
+ addTypedArrayProperty<v8::Int32Array>(&arrays_vector, isolate, arrayBuffer,
+ "[[Int32Array]]", length / 4);
+ }
+
+ if (tryCatch.HasCaught()) return;
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Array> arrays =
+ v8::Array::New(isolate, static_cast<uint32_t>(arrays_vector.size()));
+ for (uint32_t i = 0; i < static_cast<uint32_t>(arrays_vector.size()); i++)
+ createDataProperty(context, arrays, i, arrays_vector[i]);
+ if (tryCatch.HasCaught()) return;
+ info.GetReturnValue().Set(arrays);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 491a157ea8..18f9139d63 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -44,6 +44,8 @@ class V8InjectedScriptHost {
const v8::FunctionCallbackInfo<v8::Value>&);
static void nativeAccessorDescriptorCallback(
const v8::FunctionCallbackInfo<v8::Value>&);
+ static void typedArrayPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 6fba10ff11..d580c41e30 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -262,8 +262,9 @@ Response V8InspectorSessionImpl::unwrapObject(const String16& objectId,
std::unique_ptr<protocol::Runtime::API::RemoteObject>
V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
- const StringView& groupName) {
- return wrapObject(context, value, toString16(groupName), false);
+ const StringView& groupName,
+ bool generatePreview) {
+ return wrapObject(context, value, toString16(groupName), generatePreview);
}
std::unique_ptr<protocol::Runtime::RemoteObject>
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index adac6f1a85..4fb924f749 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -85,8 +85,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
v8::Local<v8::Value>*, v8::Local<v8::Context>*,
std::unique_ptr<StringBuffer>* objectGroup) override;
std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
- v8::Local<v8::Context>, v8::Local<v8::Value>,
- const StringView& groupName) override;
+ v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
+ bool generatePreview) override;
V8InspectorSession::Inspectable* inspectedObject(unsigned num);
static const unsigned kInspectedObjectBufferSize = 5;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index a8aaa1158b..8c208aaf8a 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -42,6 +42,7 @@ void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
// not happen if we have proper instrumentation, but let's double-check to be
// safe.
if (contextGroupId && *asyncParent &&
+ (*asyncParent)->externalParent().IsInvalid() &&
(*asyncParent)->contextGroupId() != contextGroupId) {
asyncParent->reset();
*externalParent = V8StackTraceId();
@@ -338,14 +339,15 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
// but doesn't synchronous we can merge them together. e.g. Promise
// ThenableJob.
if (asyncParent && frames.empty() &&
- asyncParent->m_description == description) {
+ (asyncParent->m_description == description || description.isEmpty())) {
return asyncParent;
}
- DCHECK(contextGroupId || asyncParent);
+ DCHECK(contextGroupId || asyncParent || !externalParent.IsInvalid());
if (!contextGroupId && asyncParent) {
contextGroupId = asyncParent->m_contextGroupId;
}
+
return std::shared_ptr<AsyncStackTrace>(
new AsyncStackTrace(contextGroupId, description, std::move(frames),
asyncParent, externalParent));
@@ -362,7 +364,7 @@ AsyncStackTrace::AsyncStackTrace(
m_frames(std::move(frames)),
m_asyncParent(asyncParent),
m_externalParent(externalParent) {
- DCHECK(m_contextGroupId);
+ DCHECK(m_contextGroupId || (!externalParent.IsInvalid() && m_frames.empty()));
}
std::unique_ptr<protocol::Runtime::StackTrace>
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index b8314c8fc4..08d98110ae 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -120,6 +120,7 @@ class AsyncStackTrace {
const String16& description() const;
std::weak_ptr<AsyncStackTrace> parent() const;
bool isEmpty() const;
+ const V8StackTraceId& externalParent() const { return m_externalParent; }
const std::vector<std::shared_ptr<StackFrame>>& frames() const {
return m_frames;
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index f32369df36..3835f34f6d 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -85,6 +85,7 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> property;
if (!object->Get(context, name).ToLocal(&property))
return Response::InternalError();
+ if (property->IsUndefined()) continue;
std::unique_ptr<protocol::Value> propertyValue;
Response response =
toProtocolValue(context, property, maxDepth, &propertyValue);
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 970a4ad3ad..3b466aceb9 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -170,6 +170,42 @@ void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void StoreGlobalDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kName, kValue, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StoreGlobalDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
+
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
+void StoreGlobalWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kName, kValue, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
+ VectorRegister()};
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
void StoreDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kValue, kSlot
@@ -233,21 +269,7 @@ void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StringCharAtDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kPosition
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::IntPtr()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void StringCharAtDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringCharCodeAtDescriptor::InitializePlatformIndependent(
+void StringAtDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kPosition
// TODO(turbofan): Allow builtins to return untagged values.
@@ -257,7 +279,7 @@ void StringCharCodeAtDescriptor::InitializePlatformIndependent(
machine_types);
}
-void StringCharCodeAtDescriptor::InitializePlatformSpecific(
+void StringAtDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
@@ -320,24 +342,6 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void LoadICProtoArrayDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kReceiver, kName, kSlot, kVector, kHandler
- MachineType machine_types[] = {
- MachineType::AnyTagged(), MachineType::AnyTagged(),
- MachineType::TaggedSigned(), MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void LoadICProtoArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
- VectorRegister(), HandlerRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void StoreWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kValue, kSlot, kVector
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 49c047333a..12b25a510a 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -22,13 +22,14 @@ class PlatformInterfaceDescriptor;
V(Load) \
V(LoadWithVector) \
V(LoadField) \
- V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
V(StoreWithVector) \
V(StoreNamedTransition) \
V(StoreTransition) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
@@ -50,6 +51,7 @@ class PlatformInterfaceDescriptor;
V(ConstructWithArrayLike) \
V(ConstructTrampoline) \
V(TransitionElementsKind) \
+ V(AbortJS) \
V(AllocateHeapNumber) \
V(Builtin) \
V(ArrayConstructor) \
@@ -60,8 +62,7 @@ class PlatformInterfaceDescriptor;
V(Compare) \
V(BinaryOp) \
V(StringAdd) \
- V(StringCharAt) \
- V(StringCharCodeAt) \
+ V(StringAt) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
@@ -78,6 +79,7 @@ class PlatformInterfaceDescriptor;
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
+ V(RunMicrotasks) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -454,6 +456,44 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
};
+class StoreGlobalDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kName, kValue, kSlot)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalDescriptor,
+ CallInterfaceDescriptor)
+
+ static const bool kPassLastArgsOnStack =
+ StoreDescriptor::kPassLastArgsOnStack;
+ // Pass value and slot through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+
+ static const Register NameRegister() {
+ return StoreDescriptor::NameRegister();
+ }
+
+ static const Register ValueRegister() {
+ return StoreDescriptor::ValueRegister();
+ }
+
+ static const Register SlotRegister() {
+ return StoreDescriptor::SlotRegister();
+ }
+};
+
+class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
+ public:
+ DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalWithVectorDescriptor,
+ StoreGlobalDescriptor)
+
+ static const Register VectorRegister() {
+ return StoreWithVectorDescriptor::VectorRegister();
+ }
+
+ // Pass value, slot and vector through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+};
+
class LoadWithVectorDescriptor : public LoadDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
@@ -463,15 +503,6 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
static const Register VectorRegister();
};
-class LoadICProtoArrayDescriptor : public LoadWithVectorDescriptor {
- public:
- DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector, kHandler)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadICProtoArrayDescriptor,
- LoadWithVectorDescriptor)
-
- static const Register HandlerRegister();
-};
-
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
@@ -650,6 +681,11 @@ class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
};
+class AbortJSDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject)
+ DECLARE_DESCRIPTOR(AbortJSDescriptor, CallInterfaceDescriptor)
+};
class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
public:
@@ -725,17 +761,12 @@ class StringAddDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
};
-class StringCharAtDescriptor final : public CallInterfaceDescriptor {
+// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
+// as they all have the same interface.
+class StringAtDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharAtDescriptor,
- CallInterfaceDescriptor)
-};
-
-class StringCharCodeAtDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kReceiver, kPosition)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharCodeAtDescriptor,
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringAtDescriptor,
CallInterfaceDescriptor)
};
@@ -846,6 +877,13 @@ class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
0)
};
+class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_EMPTY_PARAMETERS()
+ DECLARE_DEFAULT_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor,
+ 0)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 5be818eb2d..dcbe8029f9 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -701,14 +701,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
- const AstRawString* name, int feedback_slot, LanguageMode language_mode) {
+ const AstRawString* name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- if (language_mode == LanguageMode::kSloppy) {
- OutputStaGlobalSloppy(name_index, feedback_slot);
- } else {
- DCHECK_EQ(language_mode, LanguageMode::kStrict);
- OutputStaGlobalStrict(name_index, feedback_slot);
- }
+ OutputStaGlobal(name_index, feedback_slot);
return *this;
}
@@ -1185,8 +1180,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(BailoutReason reason) {
- OutputAbort(reason);
+BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(AbortReason reason) {
+ DCHECK_LT(reason, AbortReason::kLastErrorMessage);
+ DCHECK_GE(reason, AbortReason::kNoReason);
+ OutputAbort(static_cast<int>(reason));
return *this;
}
@@ -1280,10 +1277,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorRegisters(
- Register generator, RegisterList registers) {
- OutputRestoreGeneratorRegisters(generator, registers,
- registers.register_count());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+ Register generator, Register generator_state, RegisterList registers) {
+ OutputResumeGenerator(generator, generator_state, registers,
+ registers.register_count());
return *this;
}
@@ -1389,7 +1386,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register arg) {
- return CallRuntime(function_id, RegisterList(arg.index(), 1));
+ return CallRuntime(function_id, RegisterList(arg));
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
@@ -1411,8 +1408,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Runtime::FunctionId function_id, Register arg, RegisterList return_pair) {
- return CallRuntimeForPair(function_id, RegisterList(arg.index(), 1),
- return_pair);
+ return CallRuntimeForPair(function_id, RegisterList(arg), return_pair);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 4063791a18..021222abe5 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -85,8 +85,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Global loads to the accumulator and stores from the accumulator.
BytecodeArrayBuilder& LoadGlobal(const AstRawString* name, int feedback_slot,
TypeofMode typeof_mode);
- BytecodeArrayBuilder& StoreGlobal(const AstRawString* name, int feedback_slot,
- LanguageMode language_mode);
+ BytecodeArrayBuilder& StoreGlobal(const AstRawString* name,
+ int feedback_slot);
// Load the object at |slot_index| at |depth| in the context chain starting
// with |context| into the accumulator.
@@ -404,7 +404,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
- BytecodeArrayBuilder& Abort(BailoutReason reason);
+ BytecodeArrayBuilder& Abort(AbortReason reason);
BytecodeArrayBuilder& Return();
BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
@@ -431,8 +431,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
RegisterList registers,
int suspend_id);
BytecodeArrayBuilder& RestoreGeneratorState(Register generator);
- BytecodeArrayBuilder& RestoreGeneratorRegisters(Register generator,
- RegisterList registers);
+ BytecodeArrayBuilder& ResumeGenerator(Register generator,
+ Register generator_state,
+ RegisterList registers);
// Exception handling.
BytecodeArrayBuilder& MarkHandler(int handler_id,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 45f0d1eca9..ee94e7a2e2 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -835,6 +835,24 @@ class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
ZoneMap<Key, FeedbackSlot> map_;
};
+class BytecodeGenerator::IteratorRecord final {
+ public:
+ IteratorRecord(Register object_register, Register next_register,
+ IteratorType type = IteratorType::kNormal)
+ : type_(type), object_(object_register), next_(next_register) {
+ DCHECK(object_.is_valid() && next_.is_valid());
+ }
+
+ inline IteratorType type() const { return type_; }
+ inline Register object() const { return object_; }
+ inline Register next() const { return next_; }
+
+ private:
+ IteratorType type_;
+ Register object_;
+ Register next_;
+};
+
BytecodeGenerator::BytecodeGenerator(
CompilationInfo* info, const AstStringConstants* ast_string_constants)
: zone_(info->zone()),
@@ -1130,7 +1148,7 @@ void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
// Otherwise this is an error.
- builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(AbortReason::kInvalidJumpTableIndex);
builder()->Bind(&not_resuming);
}
@@ -1162,7 +1180,7 @@ void BytecodeGenerator::BuildGeneratorPrologue() {
}
// We fall through when the generator state is not in the jump table.
// TODO(leszeks): Only generate this for debug builds.
- builder()->Abort(BailoutReason::kInvalidJumpTableIndex);
+ builder()->Abort(AbortReason::kInvalidJumpTableIndex);
// This is a regular call.
builder()
@@ -1674,6 +1692,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
+ VisitForEffect(stmt->assign_next());
VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->next_result());
@@ -1712,9 +1731,11 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control_builder.EndTry();
- // Create a catch scope that binds the exception.
- BuildNewLocalCatchContext(stmt->scope());
- builder()->StoreAccumulatorInRegister(context);
+ if (stmt->scope()) {
+ // Create a catch scope that binds the exception.
+ BuildNewLocalCatchContext(stmt->scope());
+ builder()->StoreAccumulatorInRegister(context);
+ }
// If requested, clear message object as we enter the catch block.
if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
@@ -1725,7 +1746,11 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
builder()->LoadAccumulatorWithRegister(context);
// Evaluate the catch-block.
- VisitInScope(stmt->catch_block(), stmt->scope());
+ if (stmt->scope()) {
+ VisitInScope(stmt->catch_block(), stmt->scope());
+ } else {
+ VisitBlock(stmt->catch_block());
+ }
try_control_builder.EndCatch();
}
@@ -2069,6 +2094,8 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ expr->InitDepthAndFlags();
+
// Fast path for the empty object literal which doesn't need an
// AllocationSite.
if (expr->IsEmptyObjectLiteral()) {
@@ -2275,6 +2302,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ expr->InitDepthAndFlags();
+
// Deep-copy the literal boilerplate.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
if (expr->is_empty()) {
@@ -2290,31 +2319,25 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->CreateArrayLiteral(entry, literal_index, flags);
array_literals_.push_back(std::make_pair(expr, entry));
- Register index, literal;
+ Register index = register_allocator()->NewRegister();
+ Register literal = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
// We'll reuse the same literal slot for all of the non-constant
// subexpressions that use a keyed store IC.
// Evaluate all the non-constant subexpressions and store them into the
// newly cloned array.
- bool literal_in_accumulator = true;
FeedbackSlot slot;
- for (int array_index = 0; array_index < expr->values()->length();
- array_index++) {
- Expression* subexpr = expr->values()->at(array_index);
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ int array_index = 0;
+ ZoneList<Expression*>::iterator iter = expr->BeginValue();
+ for (; iter != expr->FirstSpreadOrEndValue(); ++iter, array_index++) {
+ Expression* subexpr = *iter;
DCHECK(!subexpr->IsSpread());
-
- if (literal_in_accumulator) {
- index = register_allocator()->NewRegister();
- literal = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
- literal_in_accumulator = false;
- }
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (slot.IsInvalid()) {
slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
}
-
builder()
->LoadLiteral(Smi::FromInt(array_index))
.StoreAccumulatorInRegister(index);
@@ -2323,10 +2346,68 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
language_mode());
}
- if (!literal_in_accumulator) {
- // Restore literal array into accumulator.
- builder()->LoadAccumulatorWithRegister(literal);
+ // Handle spread elements and elements following.
+ for (; iter != expr->EndValue(); ++iter) {
+ Expression* subexpr = *iter;
+ if (subexpr->IsSpread()) {
+ BuildArrayLiteralSpread(subexpr->AsSpread(), literal);
+ } else if (!subexpr->IsTheHoleLiteral()) {
+ // Perform %AppendElement(array, <subexpr>)
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(subexpr, args[1]);
+ builder()->CallRuntime(Runtime::kAppendElement, args);
+ } else {
+ // Peform ++<array>.length;
+ // TODO(caitp): Why can't we just %AppendElement(array, <The Hole>?)
+ auto length = ast_string_constants()->length_string();
+ builder()->LoadNamedProperty(
+ literal, length, feedback_index(feedback_spec()->AddLoadICSlot()));
+ builder()->UnaryOperation(
+ Token::INC, feedback_index(feedback_spec()->AddBinaryOpICSlot()));
+ builder()->StoreNamedProperty(
+ literal, length,
+ feedback_index(
+ feedback_spec()->AddStoreICSlot(LanguageMode::kStrict)),
+ LanguageMode::kStrict);
+ }
}
+
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+}
+
+void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread,
+ Register array) {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(array, args[0]);
+ Register next_result = args[1];
+
+ builder()->SetExpressionAsStatementPosition(spread->expression());
+ IteratorRecord iterator =
+ BuildGetIteratorRecord(spread->expression(), IteratorType::kNormal);
+ LoopBuilder loop_builder(builder(), nullptr, nullptr);
+ loop_builder.LoopHeader();
+
+ // Call the iterator's .next() method. Break from the loop if the `done`
+ // property is truthy, otherwise load the value from the iterator result and
+ // append the argument.
+ BuildIteratorNext(iterator, next_result);
+ builder()->LoadNamedProperty(
+ next_result, ast_string_constants()->done_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
+
+ loop_builder.LoopBody();
+ builder()
+ ->LoadNamedProperty(next_result, ast_string_constants()->value_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kAppendElement, args);
+ loop_builder.BindContinueTarget();
+ loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
@@ -2557,8 +2638,7 @@ void BytecodeGenerator::BuildVariableAssignment(
// TODO(ishell): consider using FeedbackSlotCache for variables here.
FeedbackSlot slot =
feedback_spec()->AddStoreGlobalICSlot(language_mode());
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot),
- language_mode());
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
break;
}
case VariableLocation::CONTEXT: {
@@ -2787,7 +2867,7 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// accumulator. When the generator is resumed, the sent value is loaded in the
// accumulator.
void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
- RegisterList registers(0, register_allocator()->next_register_index());
+ RegisterList registers = register_allocator()->AllLiveRegisters();
// Save context, registers, and state. Then return.
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
@@ -2798,19 +2878,10 @@ void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
// Upon resume, we continue here.
builder()->Bind(generator_jump_table_, suspend_id);
- // Clobbers all registers.
- builder()->RestoreGeneratorRegisters(generator_object(), registers);
-
- // Update state to indicate that we have finished resuming. Loop headers
- // rely on this.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
-
- // When resuming execution of a generator, module or async function, the sent
- // value is in the [[input_or_debug_pos]] slot.
- builder()->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
- generator_object());
+ // Clobbers all registers, updating the state to indicate that we have
+ // finished resuming and setting the accumulator to the [[input_or_debug_pos]]
+ // slot of the generator object.
+ builder()->ResumeGenerator(generator_object(), generator_state_, registers);
}
void BytecodeGenerator::VisitYield(Yield* expr) {
@@ -2903,7 +2974,9 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
//
// let output; // uninitialized
//
-// let iterator = GetIterator(iterable);
+// let iteratorRecord = GetIterator(iterable);
+// let iterator = iteratorRecord.[[Iterator]];
+// let next = iteratorRecord.[[NextMethod]];
// let input = undefined;
// let resumeMode = kNext;
//
@@ -2912,25 +2985,25 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
// // Forward input according to resumeMode and obtain output.
// switch (resumeMode) {
// case kNext:
-// output = iterator.next(input);
+// output = next.[[Call]](iterator, « »);;
// break;
// case kReturn:
// let iteratorReturn = iterator.return;
// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
-// output = %_Call(iteratorReturn, iterator, input);
+// output = iteratorReturn.[[Call]](iterator, «input»);
// break;
// case kThrow:
// let iteratorThrow = iterator.throw;
// if (IS_NULL_OR_UNDEFINED(iteratorThrow)) {
// let iteratorReturn = iterator.return;
// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
-// output = %_Call(iteratorReturn, iterator);
+// output = iteratorReturn.[[Call]](iterator, « »);
// if (IS_ASYNC_GENERATOR) output = await output;
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
// }
// throw MakeTypeError(kThrowMethodMissing);
// }
-// output = %_Call(iteratorThrow, iterator, input);
+// output = iteratorThrow.[[Call]](iterator, «input»);
// break;
// }
//
@@ -2963,13 +3036,12 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
{
RegisterAllocationScope register_scope(this);
-
RegisterList iterator_and_input = register_allocator()->NewRegisterList(2);
+ IteratorRecord iterator = BuildGetIteratorRecord(
+ expr->expression(),
+ register_allocator()->NewRegister() /* next method */,
+ iterator_and_input[0], iterator_type);
- Register iterator = iterator_and_input[0];
-
- BuildGetIterator(expr->expression(), iterator_type);
- builder()->StoreAccumulatorInRegister(iterator);
Register input = iterator_and_input[1];
builder()->LoadUndefined().StoreAccumulatorInRegister(input);
builder()
@@ -3000,109 +3072,46 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// {JSGeneratorObject::kNext} in this case.
STATIC_ASSERT(JSGeneratorObject::kNext == 0);
{
- RegisterAllocationScope register_scope(this);
- // output = iterator.next(input);
- Register iterator_next = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->next_string(),
- feedback_index(load_slot))
- .StoreAccumulatorInRegister(iterator_next)
- .CallProperty(iterator_next, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
+ FeedbackSlot slot = feedback_spec()->AddCallICSlot();
+ builder()->CallProperty(iterator.next(), iterator_and_input,
+ feedback_index(slot));
+ builder()->Jump(after_switch.New());
}
STATIC_ASSERT(JSGeneratorObject::kReturn == 1);
builder()->Bind(switch_jump_table, JSGeneratorObject::kReturn);
{
- RegisterAllocationScope register_scope(this);
- BytecodeLabels return_input(zone());
- // Trigger return from within the inner iterator.
- Register iterator_return = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->return_string(),
- feedback_index(load_slot))
- .JumpIfUndefined(return_input.New())
- .JumpIfNull(return_input.New())
- .StoreAccumulatorInRegister(iterator_return)
- .CallProperty(iterator_return, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
-
- return_input.Bind(builder());
- {
- builder()->LoadAccumulatorWithRegister(input);
- if (iterator_type == IteratorType::kAsync) {
- execution_control()->AsyncReturnAccumulator();
- } else {
- execution_control()->ReturnAccumulator();
- }
+ const AstRawString* return_string =
+ ast_string_constants()->return_string();
+ BytecodeLabels no_return_method(zone());
+
+ BuildCallIteratorMethod(iterator.object(), return_string,
+ iterator_and_input, after_switch.New(),
+ &no_return_method);
+ no_return_method.Bind(builder());
+ builder()->LoadAccumulatorWithRegister(input);
+ if (iterator_type == IteratorType::kAsync) {
+ execution_control()->AsyncReturnAccumulator();
+ } else {
+ execution_control()->ReturnAccumulator();
}
}
STATIC_ASSERT(JSGeneratorObject::kThrow == 2);
builder()->Bind(switch_jump_table, JSGeneratorObject::kThrow);
{
- BytecodeLabels iterator_throw_is_undefined(zone());
- {
- RegisterAllocationScope register_scope(this);
- // If the inner iterator has a throw method, use it to trigger an
- // exception inside.
- Register iterator_throw = register_allocator()->NewRegister();
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->throw_string(),
- feedback_index(load_slot))
- .JumpIfUndefined(iterator_throw_is_undefined.New())
- .JumpIfNull(iterator_throw_is_undefined.New())
- .StoreAccumulatorInRegister(iterator_throw);
- builder()
- ->CallProperty(iterator_throw, iterator_and_input,
- feedback_index(call_slot))
- .Jump(after_switch.New());
- }
-
- iterator_throw_is_undefined.Bind(builder());
- {
- RegisterAllocationScope register_scope(this);
- BytecodeLabels throw_throw_method_missing(zone());
- Register iterator_return = register_allocator()->NewRegister();
- // If iterator.throw does not exist, try to use iterator.return to
- // inform the iterator that it should stop.
- FeedbackSlot load_slot = feedback_spec()->AddLoadICSlot();
- FeedbackSlot call_slot = feedback_spec()->AddCallICSlot();
- builder()
- ->LoadNamedProperty(iterator,
- ast_string_constants()->return_string(),
- feedback_index(load_slot))
- .StoreAccumulatorInRegister(iterator_return);
- builder()
- ->JumpIfUndefined(throw_throw_method_missing.New())
- .JumpIfNull(throw_throw_method_missing.New())
- .CallProperty(iterator_return, RegisterList(iterator),
- feedback_index(call_slot));
-
- if (iterator_type == IteratorType::kAsync) {
- // For async generators, await the result of the .return() call.
- BuildAwait(expr->await_iterator_close_suspend_id());
- builder()->StoreAccumulatorInRegister(output);
- }
-
- builder()
- ->JumpIfJSReceiver(throw_throw_method_missing.New())
- .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, output);
-
- throw_throw_method_missing.Bind(builder());
- builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
- }
+ const AstRawString* throw_string =
+ ast_string_constants()->throw_string();
+ BytecodeLabels no_throw_method(zone());
+ BuildCallIteratorMethod(iterator.object(), throw_string,
+ iterator_and_input, after_switch.New(),
+ &no_throw_method);
+
+ // If there is no "throw" method, perform IteratorClose, and finally
+ // throw a TypeError.
+ no_throw_method.Bind(builder());
+ BuildIteratorClose(iterator, expr->await_iterator_close_suspend_id());
+ builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
}
after_switch.Bind(builder());
@@ -3355,6 +3364,11 @@ void BytecodeGenerator::VisitProperty(Property* expr) {
}
}
+void BytecodeGenerator::VisitResolvedProperty(ResolvedProperty* expr) {
+ // Handled by VisitCall().
+ UNREACHABLE();
+}
+
void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
RegisterList* arg_regs) {
// Visit arguments.
@@ -3397,6 +3411,13 @@ void BytecodeGenerator::VisitCall(Call* expr) {
VisitPropertyLoadForRegister(args.last_register(), property, callee);
break;
}
+ case Call::RESOLVED_PROPERTY_CALL: {
+ ResolvedProperty* resolved = callee_expr->AsResolvedProperty();
+ VisitAndPushIntoRegisterList(resolved->object(), &args);
+ VisitForAccumulatorValue(resolved->property());
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
if (!is_spread_call) {
@@ -3421,16 +3442,15 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Register name = register_allocator()->NewRegister();
// Call %LoadLookupSlotForCall to get the callee and receiver.
- DCHECK(Register::AreContiguous(callee, receiver));
- RegisterList result_pair(callee.index(), 2);
- USE(receiver);
-
+ RegisterList result_pair = register_allocator()->NewRegisterList(2);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(name)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
- result_pair);
+ result_pair)
+ .MoveRegister(result_pair[0], callee)
+ .MoveRegister(result_pair[1], receiver);
}
break;
}
@@ -3506,7 +3526,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
DCHECK(!implicit_undefined_receiver);
builder()->CallWithSpread(callee, args, feedback_slot_index);
} else if (call_type == Call::NAMED_PROPERTY_CALL ||
- call_type == Call::KEYED_PROPERTY_CALL) {
+ call_type == Call::KEYED_PROPERTY_CALL ||
+ call_type == Call::RESOLVED_PROPERTY_CALL) {
DCHECK(!implicit_undefined_receiver);
builder()->CallProperty(callee, args, feedback_slot_index);
} else if (implicit_undefined_receiver) {
@@ -4075,6 +4096,91 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
}
}
+// Returns an IteratorRecord which is valid for the lifetime of the current
+// register_allocation_scope.
+BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
+ Expression* iterable, Register next, Register object, IteratorType hint) {
+ DCHECK(next.is_valid() && object.is_valid());
+ BuildGetIterator(iterable, hint);
+
+ builder()
+ ->StoreAccumulatorInRegister(object)
+ .LoadNamedProperty(object, ast_string_constants()->next_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(next);
+ return IteratorRecord(object, next, hint);
+}
+
+BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
+ Expression* iterable, IteratorType hint) {
+ Register next = register_allocator()->NewRegister();
+ Register object = register_allocator()->NewRegister();
+ return BuildGetIteratorRecord(iterable, next, object, hint);
+}
+
+void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
+ Register next_result) {
+ DCHECK(next_result.is_valid());
+ builder()->CallProperty(iterator.next(), RegisterList(iterator.object()),
+ feedback_index(feedback_spec()->AddCallICSlot()));
+
+ // TODO(caitp): support async IteratorNext here.
+
+ BytecodeLabel is_object;
+ builder()
+ ->StoreAccumulatorInRegister(next_result)
+ .JumpIfJSReceiver(&is_object)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, next_result)
+ .Bind(&is_object);
+}
+
+void BytecodeGenerator::BuildCallIteratorMethod(Register iterator,
+ const AstRawString* method_name,
+ RegisterList receiver_and_args,
+ BytecodeLabel* if_called,
+ BytecodeLabels* if_notcalled) {
+ RegisterAllocationScope register_scope(this);
+
+ Register method = register_allocator()->NewRegister();
+ FeedbackSlot slot = feedback_spec()->AddLoadICSlot();
+ builder()
+ ->LoadNamedProperty(iterator, method_name, feedback_index(slot))
+ .JumpIfUndefined(if_notcalled->New())
+ .JumpIfNull(if_notcalled->New())
+ .StoreAccumulatorInRegister(method)
+ .CallProperty(method, receiver_and_args,
+ feedback_index(feedback_spec()->AddCallICSlot()))
+ .Jump(if_called);
+}
+
+void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
+ int suspend_id) {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels done(zone());
+ BytecodeLabel if_called;
+ RegisterList args = RegisterList(iterator.object());
+ BuildCallIteratorMethod(iterator.object(),
+ ast_string_constants()->return_string(), args,
+ &if_called, &done);
+ builder()->Bind(&if_called);
+
+ if (iterator.type() == IteratorType::kAsync) {
+ DCHECK_GE(suspend_id, 0);
+ BuildAwait(suspend_id);
+ }
+
+ builder()->JumpIfJSReceiver(done.New());
+ {
+ RegisterAllocationScope register_scope(this);
+ Register return_result = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(return_result)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, return_result);
+ }
+
+ done.Bind(builder());
+}
+
void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
builder()->SetExpressionPosition(expr);
BuildGetIterator(expr->iterable(), expr->hint());
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 9b7b572db3..f9de9550fe 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -56,6 +56,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class EffectResultScope;
class FeedbackSlotCache;
class GlobalDeclarationsBuilder;
+ class IteratorRecord;
class NaryCodeCoverageSlots;
class RegisterAllocationScope;
class TestResultScope;
@@ -151,6 +152,26 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildGetIterator(Expression* iterable, IteratorType hint);
+ // Create an IteratorRecord with pre-allocated registers holding the next
+ // method and iterator object.
+ IteratorRecord BuildGetIteratorRecord(Expression* iterable,
+ Register iterator_next,
+ Register iterator_object,
+ IteratorType hint);
+
+ // Create an IteratorRecord allocating new registers to hold the next method
+ // and iterator object.
+ IteratorRecord BuildGetIteratorRecord(Expression* iterable,
+ IteratorType hint);
+ void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
+ void BuildIteratorClose(const IteratorRecord& iterator, int suspend_id = -1);
+ void BuildCallIteratorMethod(Register iterator, const AstRawString* method,
+ RegisterList receiver_and_args,
+ BytecodeLabel* if_called,
+ BytecodeLabels* if_notcalled);
+
+ void BuildArrayLiteralSpread(Spread* spread, Register array);
+
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index ff335d6f20..8509bd43e0 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -87,6 +87,11 @@ class BytecodeRegisterAllocator final {
return reg.index() < next_register_index_;
}
+ // Returns a register list for all currently live registers.
+ RegisterList AllLiveRegisters() const {
+ return RegisterList(0, next_register_index());
+ }
+
void set_observer(Observer* observer) { observer_ = observer; }
int next_register_index() const { return next_register_index_; }
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 94dc930920..d75e45967b 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -448,7 +448,7 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
if (reg_list.register_count() == 1) {
// If there is only a single register, treat it as a normal input register.
Register reg(GetInputRegister(reg_list.first_register()));
- return RegisterList(reg.index(), 1);
+ return RegisterList(reg);
} else {
int start_index = reg_list.first_register().index();
for (int i = 0; i < reg_list.register_count(); ++i) {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index fababcf19e..92673d9cac 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator ||
- bytecode == Bytecode::kRestoreGeneratorRegisters) {
+ bytecode == Bytecode::kResumeGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
// aren't known)
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 07ed756522..b5420f7e72 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -61,9 +61,9 @@ class V8_EXPORT_PRIVATE Register final {
}
static bool AreContiguous(Register reg1, Register reg2,
- Register reg3 = Register(),
- Register reg4 = Register(),
- Register reg5 = Register());
+ Register reg3 = invalid_value(),
+ Register reg4 = invalid_value(),
+ Register reg5 = invalid_value());
std::string ToString(int parameter_count) const;
@@ -98,14 +98,11 @@ class V8_EXPORT_PRIVATE Register final {
class RegisterList {
public:
- RegisterList() : first_reg_index_(Register().index()), register_count_(0) {}
- RegisterList(int first_reg_index, int register_count)
- : first_reg_index_(first_reg_index), register_count_(register_count) {}
+ RegisterList()
+ : first_reg_index_(Register::invalid_value().index()),
+ register_count_(0) {}
explicit RegisterList(Register r) : RegisterList(r.index(), 1) {}
- // Increases the size of the register list by one.
- void IncrementRegisterCount() { register_count_++; }
-
// Returns a new RegisterList which is a truncated version of this list, with
// |count| registers.
const RegisterList Truncate(int new_count) {
@@ -130,6 +127,17 @@ class RegisterList {
int register_count() const { return register_count_; }
private:
+ friend class BytecodeRegisterAllocator;
+ friend class BytecodeDecoder;
+ friend class InterpreterTester;
+ friend class BytecodeUtils;
+
+ RegisterList(int first_reg_index, int register_count)
+ : first_reg_index_(first_reg_index), register_count_(register_count) {}
+
+ // Increases the size of the register list by one.
+ void IncrementRegisterCount() { register_count_++; }
+
int first_reg_index_;
int register_count_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 2d3fc2c96e..ce01566d52 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -42,10 +42,7 @@ namespace interpreter {
V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx) \
- V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
- V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
+ V(StaGlobal, AccumulatorUse::kRead, OperandType::kIdx, OperandType::kIdx) \
\
/* Context operations */ \
V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
@@ -320,8 +317,8 @@ namespace interpreter {
V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \
V(SuspendGenerator, AccumulatorUse::kNone, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \
- V(RestoreGeneratorRegisters, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kRegOutList, OperandType::kRegCount) \
+ V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegOut, OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 70b8bc5c1a..47bb955374 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -115,7 +115,7 @@ void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
for (const Entry& prev_entry : constants_) {
os << i++ << ": " << Brief(*prev_entry.ToHandle(isolate)) << std::endl;
}
- FATAL(os.str().c_str());
+ FATAL("%s", os.str().c_str());
}
}
}
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 2ff7f2130a..4b6c44b95d 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -34,7 +34,7 @@ Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
- Entry entry = {0, 0, 0, Register(), HandlerTable::UNCAUGHT};
+ Entry entry = {0, 0, 0, Register::invalid_value(), HandlerTable::UNCAUGHT};
entries_.push_back(entry);
return handler_id;
}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e4cc104b76..846b69281e 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -577,7 +577,7 @@ void InterpreterAssembler::CallEpilogue() {
Node* stack_pointer_before_call = stack_pointer_before_call_;
stack_pointer_before_call_ = nullptr;
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
- kUnexpectedStackPointer);
+ AbortReason::kUnexpectedStackPointer);
}
}
@@ -586,7 +586,11 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Comment("increment call count");
Node* call_count =
LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize);
- Node* new_count = SmiAdd(call_count, SmiConstant(1));
+ // The lowest {CallICNexus::CallCountField::kShift} bits of the call
+ // count are used as flags. To increment the call count by 1 we hence
+ // have to increment by 1 << {CallICNexus::CallCountField::kShift}.
+ Node* new_count =
+ SmiAdd(call_count, SmiConstant(1 << CallICNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
SKIP_WRITE_BARRIER, kPointerSize);
@@ -1309,7 +1313,6 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
break;
default:
UNREACHABLE();
- base_index = nullptr;
}
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
@@ -1350,20 +1353,20 @@ Node* InterpreterAssembler::LoadOSRNestingLevel() {
MachineType::Int8());
}
-void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+void InterpreterAssembler::Abort(AbortReason abort_reason) {
disable_stack_check_across_call_ = true;
- Node* abort_id = SmiConstant(bailout_reason);
+ Node* abort_id = SmiConstant(abort_reason);
CallRuntime(Runtime::kAbort, GetContext(), abort_id);
disable_stack_check_across_call_ = false;
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
- BailoutReason bailout_reason) {
+ AbortReason abort_reason) {
Label ok(this), abort(this, Label::kDeferred);
Branch(WordEqual(lhs, rhs), &ok, &abort);
BIND(&abort);
- Abort(bailout_reason);
+ Abort(abort_reason);
Goto(&ok);
BIND(&ok);
@@ -1383,7 +1386,7 @@ void InterpreterAssembler::MaybeDropFrames(Node* context) {
// We don't expect this call to return since the frame dropper tears down
// the stack and jumps into the function on the target frame to restart it.
CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
- Abort(kUnexpectedReturnFromFrameDropper);
+ Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
Goto(&ok);
BIND(&ok);
@@ -1442,7 +1445,7 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
Branch(UintPtrLessThanOrEqual(register_count, array_size), &ok, &abort);
BIND(&abort);
- Abort(kInvalidRegisterFileInGenerator);
+ Abort(AbortReason::kInvalidRegisterFileInGenerator);
Goto(&ok);
BIND(&ok);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 2b38508441..63d1709145 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -225,10 +225,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
- // Abort with the given bailout reason.
- void Abort(BailoutReason bailout_reason);
+ // Abort with the given abort reason.
+ void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
- BailoutReason bailout_reason);
+ AbortReason abort_reason);
// Abort if |register_count| is invalid for given register file array.
void AbortIfRegisterCountInvalid(compiler::Node* register_file,
compiler::Node* register_count);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 1665aff29b..5dabc13ea0 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -158,68 +158,29 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
- // Must be kept in sync with AccessorAssembler::LoadGlobalIC.
-
- // Load the global via the LoadGlobalIC.
- Node* feedback_vector = LoadFeedbackVector();
+ TNode<FeedbackVector> feedback_vector = CAST(LoadFeedbackVector());
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
+ Label done(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ ExitPoint exit_point(this, &done, &var_result);
- Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred);
-
- // Fast path without frame construction for the data case.
- {
- Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- ExitPoint exit_point(this, &done, &var_result);
+ LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
- accessor_asm.LoadGlobalIC_TryPropertyCellCase(
- feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
- CodeStubAssembler::INTPTR_PARAMETERS);
-
- BIND(&done);
- SetAccumulator(var_result.value());
- Dispatch();
- }
-
- // Slow path with frame construction.
- {
- Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- ExitPoint exit_point(this, &done, &var_result);
-
- BIND(&try_handler);
- {
- Node* context = GetContext();
- Node* smi_slot = SmiTag(feedback_slot);
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
-
- AccessorAssembler::LoadICParameters params(context, nullptr, name,
- smi_slot, feedback_vector);
- accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
- &exit_point, &miss);
- }
+ LazyNode<Name> lazy_name = [=] {
+ Node* name_index = BytecodeOperandIdx(name_operand_index);
+ Node* name = LoadConstantPoolEntry(name_index);
+ return CAST(name);
+ };
- BIND(&miss);
- {
- Node* context = GetContext();
- Node* smi_slot = SmiTag(feedback_slot);
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
-
- AccessorAssembler::LoadICParameters params(context, nullptr, name,
- smi_slot, feedback_vector);
- accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
- }
+ accessor_asm.LoadGlobalIC(feedback_vector, feedback_slot, lazy_context,
+ lazy_name, typeof_mode, &exit_point,
+ CodeStubAssembler::INTPTR_PARAMETERS);
- BIND(&done);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
- }
+ BIND(&done);
+ SetAccumulator(var_result.value());
+ Dispatch();
}
};
@@ -245,50 +206,23 @@ IGNITION_HANDLER(LdaGlobalInsideTypeof, InterpreterLoadGlobalAssembler) {
LdaGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF);
}
-class InterpreterStoreGlobalAssembler : public InterpreterAssembler {
- public:
- InterpreterStoreGlobalAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- void StaGlobal(Callable ic) {
- // Get the global object.
- Node* context = GetContext();
- Node* native_context = LoadNativeContext(context);
- Node* global = LoadContextElement(native_context, Context::EXTENSION_INDEX);
-
- // Store the global via the StoreIC.
- Node* code_target = HeapConstant(ic.code());
- Node* constant_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(constant_index);
- Node* value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
- Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
- CallStub(ic.descriptor(), code_target, context, global, name, value,
- smi_slot, feedback_vector);
- Dispatch();
- }
-};
-
-// StaGlobalSloppy <name_index> <slot>
+// StaGlobal <name_index> <slot>
//
// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-IGNITION_HANDLER(StaGlobalSloppy, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
- isolate(), LanguageMode::kSloppy);
- StaGlobal(ic);
-}
+// entry <name_index> using FeedBackVector slot <slot>.
+IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
+ Node* context = GetContext();
-// StaGlobalStrict <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-IGNITION_HANDLER(StaGlobalStrict, InterpreterStoreGlobalAssembler) {
- Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(
- isolate(), LanguageMode::kStrict);
- StaGlobal(ic);
+ // Store the global via the StoreGlobalIC.
+ Node* constant_index = BytecodeOperandIdx(0);
+ Node* name = LoadConstantPoolEntry(constant_index);
+ Node* value = GetAccumulator();
+ Node* raw_slot = BytecodeOperandIdx(1);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVector();
+ Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
+ CallStub(ic, context, name, value, smi_slot, feedback_vector);
+ Dispatch();
}
// LdaContextSlot <context> <slot_index> <depth>
@@ -802,7 +736,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
BIND(&if_import);
{
// Not supported (probably never).
- Abort(kUnsupportedModuleOperation);
+ Abort(AbortReason::kUnsupportedModuleOperation);
Goto(&end);
}
@@ -1245,8 +1179,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&if_bigint);
{
var_result.Bind(BigIntOp(value));
- CombineFeedback(&var_feedback,
- SmiConstant(BinaryOperationFeedback::kBigInt));
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
Goto(&end);
}
@@ -1257,8 +1190,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
// only reach this path on the first pass when the feedback is kNone.
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
- var_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
+ OverwriteFeedback(&var_feedback,
+ BinaryOperationFeedback::kNumberOrOddball);
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
Goto(&start);
}
@@ -1270,7 +1203,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
// only reach this path on the first pass when the feedback is kNone.
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
- var_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
var_value.Bind(
CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value));
Goto(&start);
@@ -1279,8 +1212,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&do_float_op);
{
- CombineFeedback(&var_feedback,
- SmiConstant(BinaryOperationFeedback::kNumber));
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
var_result.Bind(
AllocateHeapNumberWithValue(FloatOp(var_float_value.value())));
Goto(&end);
@@ -1310,14 +1242,12 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
// Else simply subtract operand from 0.
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
var_result.Bind(SmiSub(SmiConstant(0), smi_value));
Goto(&end);
BIND(&if_zero);
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kNumber));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
var_result.Bind(MinusZeroConstant());
Goto(&end);
@@ -1412,8 +1342,7 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
}
BIND(&if_notoverflow);
- CombineFeedback(var_feedback,
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
return BitcastWordToTaggedSigned(Projection(0, pair));
}
@@ -2076,11 +2005,11 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
GotoIf(TaggedIsSmi(object), &if_false);
// Check if callable bit is set and not undetectable.
Node* map_bitfield = LoadMapBitField(LoadMap(object));
- Node* callable_undetectable = Word32And(
- map_bitfield,
- Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
+ Node* callable_undetectable =
+ Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
+ Map::IsCallableBit::kMask));
Branch(Word32Equal(callable_undetectable,
- Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(Map::IsCallableBit::kMask)),
&if_true, &if_false);
}
BIND(&if_object);
@@ -2095,9 +2024,9 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Node* map = LoadMap(object);
GotoIfNot(IsJSReceiverMap(map), &if_false);
Node* map_bitfield = LoadMapBitField(map);
- Node* callable_undetectable = Word32And(
- map_bitfield,
- Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
+ Node* callable_undetectable =
+ Word32And(map_bitfield, Int32Constant(Map::IsUndetectableBit::kMask |
+ Map::IsCallableBit::kMask));
Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true,
&if_false);
}
@@ -2798,7 +2727,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
Node* context = GetContext();
CallRuntime(Runtime::kThrow, context, exception);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
// ReThrow
@@ -2809,10 +2738,10 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
Node* context = GetContext();
CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
-// Abort <bailout_reason>
+// Abort <abort_reason>
//
// Aborts execution (via a call to the runtime function).
IGNITION_HANDLER(Abort, InterpreterAssembler) {
@@ -2845,7 +2774,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0));
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -2863,7 +2792,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
{
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -2882,7 +2811,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
{
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
// We shouldn't ever return from a throw.
- Abort(kUnexpectedReturnFromThrow);
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
}
}
@@ -3140,7 +3069,9 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// Illegal
//
// An invalid bytecode aborting execution if dispatched.
-IGNITION_HANDLER(Illegal, InterpreterAssembler) { Abort(kInvalidBytecode); }
+IGNITION_HANDLER(Illegal, InterpreterAssembler) {
+ Abort(AbortReason::kInvalidBytecode);
+}
// SuspendGenerator <generator> <first input register> <register count>
// <suspend_id>
@@ -3212,18 +3143,20 @@ IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) {
Dispatch();
}
-// RestoreGeneratorRegisters <generator> <first output register> <register
-// count>
+// ResumeGenerator <generator> <generator_state> <first output
+// register> <register count>
//
-// Imports the register file stored in the generator.
-IGNITION_HANDLER(RestoreGeneratorRegisters, InterpreterAssembler) {
+// Imports the register file stored in the generator and marks the generator
+// state as executing.
+IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Node* generator_reg = BytecodeOperandReg(0);
- // Bytecode operand 1 is the start register. It should always be 0, so let's
+ Node* generator_state_reg = BytecodeOperandReg(1);
+ // Bytecode operand 2 is the start register. It should always be 0, so let's
// ignore it.
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
+ CSA_ASSERT(this, WordEqual(BytecodeOperandReg(2),
IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 2 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
+ // Bytecode operand 3 is the number of registers to store to the generator.
+ Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(3));
Node* generator = LoadRegister(generator_reg);
@@ -3231,6 +3164,15 @@ IGNITION_HANDLER(RestoreGeneratorRegisters, InterpreterAssembler) {
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
register_count);
+ // Since we're resuming, update the generator state to indicate that the
+ // generator is now executing.
+ StoreRegister(SmiConstant(JSGeneratorObject::kGeneratorExecuting),
+ generator_state_reg);
+
+ // Return the generator's input_or_debug_pos in the accumulator.
+ SetAccumulator(
+ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset));
+
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 39cb45c96c..7ad8d49b63 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -107,7 +107,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
__ BIND(&abort);
{
- __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+ __ Abort(AbortReason::kUnexpectedFunctionIDForInvokeIntrinsic);
result.Bind(__ UndefinedConstant());
__ Goto(&end);
}
@@ -331,7 +331,7 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
InterpreterAssembler::Label arg_count_positive(assembler_);
Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
__ GotoIfNot(comparison, &arg_count_positive);
- __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
__ BIND(&arg_count_positive);
}
@@ -472,7 +472,7 @@ void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
InterpreterAssembler::Label match(assembler_);
Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
__ GotoIf(comparison, &match);
- __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&match);
__ BIND(&match);
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 1f359f1a0f..fb74d37df4 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -180,8 +180,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
parse_info()->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundIgnition
- : &RuntimeCallStats::CompileIgnition);
+ ? RuntimeCallCounterId::kCompileBackgroundIgnition
+ : RuntimeCallCounterId::kCompileIgnition);
// TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
@@ -201,7 +201,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
- &RuntimeCallStats::CompileIgnitionFinalization);
+ RuntimeCallCounterId::kCompileIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index e3ee968f79..7165d88d34 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -54,7 +54,8 @@
#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
@@ -109,6 +110,8 @@ void ThreadLocalTop::InitializeInternal() {
rethrowing_message_ = false;
pending_message_obj_ = nullptr;
scheduled_exception_ = nullptr;
+ microtask_queue_bailout_index_ = -1;
+ microtask_queue_bailout_count_ = 0;
}
@@ -332,8 +335,8 @@ void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
"ptr6=%p ptr7=%p ptr8=%p\n\n%s",
magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8,
reinterpret_cast<char*>(buffer));
- PushCodeObjectsAndDie(0xdeadc0de, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
- ptr8, 0xdeadc0de);
+ PushCodeObjectsAndDie(0xDEADC0DE, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
+ ptr8, 0xDEADC0DE);
}
void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2,
@@ -429,12 +432,12 @@ class FrameArrayBuilder {
//====================================================================
const auto& summary = summ.AsWasmCompiled();
if (!summary.code().IsCodeObject() &&
- summary.code().GetWasmCode()->kind() != wasm::WasmCode::Function) {
+ summary.code().GetWasmCode()->kind() != wasm::WasmCode::kFunction) {
continue;
}
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
- if (instance->compiled_module()->is_asm_js()) {
+ if (instance->compiled_module()->shared()->is_asm_js()) {
flags |= FrameArray::kIsAsmJsWasmFrame;
if (WasmCompiledFrame::cast(frame)->at_to_number_conversion()) {
flags |= FrameArray::kAsmJsAtNumberConversion;
@@ -453,7 +456,7 @@ class FrameArrayBuilder {
const auto& summary = summ.AsWasmInterpreted();
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->compiled_module()->is_asm_js());
+ DCHECK(!instance->compiled_module()->shared()->is_asm_js());
elements_ = FrameArray::AppendWasmFrame(elements_, instance,
summary.function_index(), {},
summary.byte_offset(), flags);
@@ -667,6 +670,11 @@ Handle<FixedArray> Isolate::GetDetailedStackTrace(
Address Isolate::GetAbstractPC(int* line, int* column) {
JavaScriptFrameIterator it(this);
+ if (it.done()) {
+ *line = -1;
+ *column = -1;
+ return nullptr;
+ }
JavaScriptFrame* frame = it.frame();
DCHECK(!frame->is_builtin());
int position = frame->position();
@@ -764,10 +772,10 @@ class CaptureStackTraceHelper {
const FrameSummary::WasmFrameSummary& summ) {
Handle<StackFrameInfo> info = factory()->NewStackFrameInfo();
- Handle<WasmCompiledModule> compiled_module(
- summ.wasm_instance()->compiled_module(), isolate_);
- Handle<String> name = WasmCompiledModule::GetFunctionName(
- isolate_, compiled_module, summ.function_index());
+ Handle<WasmSharedModuleData> shared(
+ summ.wasm_instance()->compiled_module()->shared(), isolate_);
+ Handle<String> name = WasmSharedModuleData::GetFunctionName(
+ isolate_, shared, summ.function_index());
info->set_function_name(*name);
// Encode the function index as line number (1-based).
info->set_line_number(summ.function_index() + 1);
@@ -1029,7 +1037,7 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
void Isolate::InvokeApiInterruptCallbacks() {
RuntimeCallTimerScope runtimeTimer(
- this, &RuntimeCallStats::InvokeApiInterruptCallbacks);
+ this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
// Note: callback below should be called outside of execution access lock.
while (true) {
InterruptEntry entry;
@@ -1133,7 +1141,7 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
#ifdef DEBUG
if (AllowHeapAllocation::IsAllowed()) {
#else
- if (false) {
+ if ((false)) {
#endif
printf(", %d:%d - %d:%d\n",
Script::GetLineNumber(script, location->start_pos()) + 1,
@@ -1301,7 +1309,7 @@ Object* Isolate::UnwindAndFindHandler() {
set_wasm_caught_exception(exception);
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* wasm_code =
- wasm_code_manager()->LookupCode(frame->pc());
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
return FoundHandler(nullptr, wasm_code->instructions().start(),
offset, wasm_code->constant_pool(), return_sp,
frame->fp());
@@ -1683,8 +1691,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
for (int i = 0; i < frame_count; i++) {
if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
Handle<WasmCompiledModule> compiled_module(
- WasmInstanceObject::cast(elements->WasmInstance(i))
- ->compiled_module());
+ elements->WasmInstance(i)->compiled_module());
uint32_t func_index =
static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
int code_offset = elements->Offset(i)->value();
@@ -1701,9 +1708,10 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
- int pos = WasmCompiledModule::GetSourcePosition(
- compiled_module, func_index, byte_offset, is_at_number_conversion);
- Handle<Script> script(compiled_module->script());
+ int pos = WasmSharedModuleData::GetSourcePosition(
+ handle(compiled_module->shared(), this), func_index, byte_offset,
+ is_at_number_conversion);
+ Handle<Script> script(compiled_module->shared()->script());
*target = MessageLocation(script, pos, pos + 1);
return true;
@@ -1810,21 +1818,9 @@ bool Isolate::IsExternalHandlerOnTop(Object* exception) {
return (entry_handler > external_handler);
}
-
-void Isolate::ReportPendingMessages() {
- DCHECK(AllowExceptions::IsAllowed(this));
-
- // The embedder might run script in response to an exception.
- AllowJavascriptExecutionDebugOnly allow_script(this);
-
+void Isolate::ReportPendingMessagesImpl(bool report_externally) {
Object* exception = pending_exception();
- // Try to propagate the exception to an external v8::TryCatch handler. If
- // propagation was unsuccessful, then we will get another chance at reporting
- // the pending message if the exception is re-thrown.
- bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
- if (!has_been_propagated) return;
-
// Clear the pending message object early to avoid endless recursion.
Object* message_obj = thread_local_top_.pending_message_obj_;
clear_pending_message();
@@ -1837,7 +1833,7 @@ void Isolate::ReportPendingMessages() {
// depending on whether and external v8::TryCatch or an internal JavaScript
// handler is on top.
bool should_report_exception;
- if (IsExternalHandlerOnTop(exception)) {
+ if (report_externally) {
// Only report the exception if the external handler is verbose.
should_report_exception = try_catch_handler()->is_verbose_;
} else {
@@ -1858,6 +1854,85 @@ void Isolate::ReportPendingMessages() {
}
}
+void Isolate::ReportPendingMessages() {
+ DCHECK(AllowExceptions::IsAllowed(this));
+
+ // The embedder might run script in response to an exception.
+ AllowJavascriptExecutionDebugOnly allow_script(this);
+
+ Object* exception = pending_exception();
+
+ // Try to propagate the exception to an external v8::TryCatch handler. If
+ // propagation was unsuccessful, then we will get another chance at reporting
+ // the pending message if the exception is re-thrown.
+ bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
+ if (!has_been_propagated) return;
+
+ ReportPendingMessagesImpl(IsExternalHandlerOnTop(exception));
+}
+
+void Isolate::ReportPendingMessagesFromJavaScript() {
+ DCHECK(AllowExceptions::IsAllowed(this));
+
+ auto IsHandledByJavaScript = [=]() {
+ // In this situation, the exception is always a non-terminating exception.
+
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ DCHECK_NOT_NULL(entry_handler);
+ entry_handler =
+ reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return true;
+
+ return (entry_handler < external_handler);
+ };
+
+ auto IsHandledExternally = [=]() {
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return false;
+
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ DCHECK_NOT_NULL(entry_handler);
+ entry_handler =
+ reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+ return (entry_handler > external_handler);
+ };
+
+ auto PropagateToExternalHandler = [=]() {
+ if (IsHandledByJavaScript()) {
+ thread_local_top_.external_caught_exception_ = false;
+ return false;
+ }
+
+ if (!IsHandledExternally()) {
+ thread_local_top_.external_caught_exception_ = false;
+ return true;
+ }
+
+ thread_local_top_.external_caught_exception_ = true;
+ v8::TryCatch* handler = try_catch_handler();
+ DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
+ thread_local_top_.pending_message_obj_->IsTheHole(this));
+ handler->can_continue_ = true;
+ handler->has_terminated_ = false;
+ handler->exception_ = pending_exception();
+ // Propagate to the external try-catch only if we got an actual message.
+ if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
+
+ handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ return true;
+ };
+
+ // Try to propagate to an external v8::TryCatch handler.
+ if (!PropagateToExternalHandler()) return;
+
+ ReportPendingMessagesImpl(true);
+}
MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
@@ -2041,8 +2116,23 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
}
Handle<Object> retval = undefined;
PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
- for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
- switch (PredictException(it.frame())) {
+ for (StackFrameIterator it(this); !it.done(); it.Advance()) {
+ StackFrame* frame = it.frame();
+ HandlerTable::CatchPrediction catch_prediction;
+ if (frame->is_java_script()) {
+ catch_prediction = PredictException(JavaScriptFrame::cast(frame));
+ } else if (frame->type() == StackFrame::STUB) {
+ Code* code = frame->LookupCode();
+ if (!code->IsCode() || code->kind() != Code::BUILTIN ||
+ !code->handler_table()->length() || !code->is_turbofanned()) {
+ continue;
+ }
+ catch_prediction = code->GetBuiltinCatchPrediction();
+ } else {
+ continue;
+ }
+
+ switch (catch_prediction) {
case HandlerTable::UNCAUGHT:
continue;
case HandlerTable::CAUGHT:
@@ -2392,9 +2482,9 @@ Isolate::Isolate(bool enable_serializer)
descriptor_lookup_cache_(nullptr),
handle_scope_implementer_(nullptr),
unicode_cache_(nullptr),
- allocator_(FLAG_trace_gc_object_stats ? new VerboseAccountingAllocator(
- &heap_, 256 * KB, 128 * KB)
- : new AccountingAllocator()),
+ allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
+ &heap_, 256 * KB, 128 * KB)
+ : new AccountingAllocator()),
inner_pointer_to_code_cache_(nullptr),
global_handles_(nullptr),
eternal_handles_(nullptr),
@@ -2432,7 +2522,6 @@ Isolate::Isolate(bool enable_serializer)
use_counter_callback_(nullptr),
basic_block_profiler_(nullptr),
cancelable_task_manager_(new CancelableTaskManager()),
- wasm_compilation_manager_(new wasm::CompilationManager()),
abort_on_uncaught_exception_callback_(nullptr),
total_regexp_code_generated_(0) {
{
@@ -2454,9 +2543,6 @@ Isolate::Isolate(bool enable_serializer)
thread_manager_->isolate_ = this;
#ifdef DEBUG
- // heap_histograms_ initializes itself.
- memset(&js_spill_information_, 0, sizeof(js_spill_information_));
-
non_disposed_isolates_.Increment(1);
#endif // DEBUG
@@ -2482,6 +2568,10 @@ Isolate::Isolate(bool enable_serializer)
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
+ if (FLAG_stress_sampling_allocation_profiler > 0) {
+ heap_profiler()->StopSamplingHeapProfiler();
+ }
+
// Temporarily set this isolate as current so that various parts of
// the isolate can access it in their destructors without having a
// direct pointer. We don't use Enter/Exit here to avoid
@@ -2535,7 +2625,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_compilation_manager_->TearDown();
+ wasm_engine()->compilation_manager()->TearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -2686,9 +2776,8 @@ Isolate::~Isolate() {
allocator_ = nullptr;
#if USE_SIMULATOR
- Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
+ Simulator::TearDown(simulator_i_cache_);
simulator_i_cache_ = nullptr;
- simulator_redirection_ = nullptr;
#endif
}
@@ -2816,11 +2905,8 @@ bool Isolate::Init(StartupDeserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390
Simulator::Initialize(this);
#endif
-#endif
{ // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
@@ -2837,16 +2923,15 @@ bool Isolate::Init(StartupDeserializer* des) {
return false;
}
- // Setup the wasm code manager. Currently, there's one per Isolate.
- if (!wasm_code_manager_) {
- size_t max_code_size = kMaxWasmCodeMemory;
- if (kRequiresCodeRange) {
- max_code_size = std::min(max_code_size,
- heap_.memory_allocator()->code_range()->size());
- }
- wasm_code_manager_.reset(new wasm::WasmCodeManager(
- reinterpret_cast<v8::Isolate*>(this), max_code_size));
- }
+ // Setup the wasm engine. Currently, there's one per Isolate.
+ const size_t max_code_size =
+ kRequiresCodeRange
+ ? std::min(kMaxWasmCodeMemory,
+ heap_.memory_allocator()->code_range()->size())
+ : kMaxWasmCodeMemory;
+ wasm_engine_.reset(new wasm::WasmEngine(
+ std::unique_ptr<wasm::WasmCodeManager>(new wasm::WasmCodeManager(
+ reinterpret_cast<v8::Isolate*>(this), max_code_size))));
// Initialize the interface descriptors ahead of time.
#define INTERFACE_DESCRIPTOR(Name, ...) \
@@ -2953,6 +3038,15 @@ bool Isolate::Init(StartupDeserializer* des) {
if (!FLAG_inline_new) heap_.DisableInlineAllocation();
+ if (FLAG_stress_sampling_allocation_profiler > 0) {
+ uint64_t sample_interval = FLAG_stress_sampling_allocation_profiler;
+ int stack_depth = 128;
+ v8::HeapProfiler::SamplingFlags sampling_flags =
+ v8::HeapProfiler::SamplingFlags::kSamplingForceGC;
+ heap_profiler()->StartSamplingHeapProfiler(sample_interval, stack_depth,
+ sampling_flags);
+ }
+
return true;
}
@@ -3090,7 +3184,7 @@ bool Isolate::use_optimizer() {
bool Isolate::NeedsSourcePositionsForProfiling() const {
return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
- debug_->is_active() || logger_->is_logging();
+ debug_->is_active() || logger_->is_logging() || FLAG_trace_maps;
}
void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
@@ -3098,27 +3192,32 @@ void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
heap()->set_feedback_vectors_for_profiling_tools(value);
}
-void Isolate::InitializeVectorListFromHeap() {
+void Isolate::MaybeInitializeVectorListFromHeap() {
+ if (!heap()->feedback_vectors_for_profiling_tools()->IsUndefined(this)) {
+ // Already initialized, return early.
+ DCHECK(heap()->feedback_vectors_for_profiling_tools()->IsArrayList());
+ return;
+ }
+
// Collect existing feedback vectors.
std::vector<Handle<FeedbackVector>> vectors;
+
{
HeapIterator heap_iterator(heap());
while (HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(current_obj);
- shared->set_has_reported_binary_coverage(false);
- } else if (current_obj->IsFeedbackVector()) {
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
- if (!shared->IsSubjectToDebugging()) continue;
- vector->clear_invocation_count();
- vectors.emplace_back(vector, this);
- }
+ if (!current_obj->IsFeedbackVector()) continue;
+
+ FeedbackVector* vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo* shared = vector->shared_function_info();
+
+ // No need to preserve the feedback vector for non-user-visible functions.
+ if (!shared->IsSubjectToDebugging()) continue;
+
+ vectors.emplace_back(vector, this);
}
}
- // Add collected feedback vectors to the root list lest we lose them to
- // GC.
+ // Add collected feedback vectors to the root list lest we lose them to GC.
Handle<ArrayList> list =
ArrayList::New(this, static_cast<int>(vectors.size()));
for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
@@ -3356,7 +3455,16 @@ base::RandomNumberGenerator* Isolate::random_number_generator() {
}
base::RandomNumberGenerator* Isolate::fuzzer_rng() {
- return ensure_rng_exists(&fuzzer_rng_, FLAG_fuzzer_random_seed);
+ if (fuzzer_rng_ == nullptr) {
+ int64_t seed = FLAG_fuzzer_random_seed;
+ if (seed == 0) {
+ seed = random_number_generator()->initial_seed();
+ }
+
+ fuzzer_rng_ = new base::RandomNumberGenerator(seed);
+ }
+
+ return fuzzer_rng_;
}
int Isolate::GenerateIdentityHash(uint32_t mask) {
@@ -3688,72 +3796,89 @@ void Isolate::RunMicrotasksInternal() {
if (!pending_microtask_count()) return;
TRACE_EVENT0("v8.execute", "RunMicrotasks");
TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
- while (pending_microtask_count() > 0) {
- HandleScope scope(this);
- int num_tasks = pending_microtask_count();
- // Do not use factory()->microtask_queue() here; we need a fresh handle!
- Handle<FixedArray> queue(heap()->microtask_queue(), this);
- DCHECK(num_tasks <= queue->length());
- set_pending_microtask_count(0);
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- Isolate* isolate = this;
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
- Handle<Object> microtask(queue->get(i), this);
-
- if (microtask->IsCallHandlerInfo()) {
- Handle<CallHandlerInfo> callback_info =
- Handle<CallHandlerInfo>::cast(microtask);
- v8::MicrotaskCallback callback =
- v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
- void* data = v8::ToCData<void*>(callback_info->data());
- callback(data);
- } else {
- SaveContext save(this);
- Context* context;
- if (microtask->IsJSFunction()) {
- context = Handle<JSFunction>::cast(microtask)->context();
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- context =
- Handle<PromiseResolveThenableJobInfo>::cast(microtask)->context();
- } else {
- context = Handle<PromiseReactionJobInfo>::cast(microtask)->context();
- }
+ do {
+ HandleScope handle_scope(this);
+ set_microtask_queue_bailout_index(-1);
+ set_microtask_queue_bailout_count(-1);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
+ this, Execution::MessageHandling::kReport, &maybe_exception);
+ if (maybe_result.is_null() && maybe_exception.is_null()) {
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+ set_pending_microtask_count(0);
+ return;
+ }
- set_context(context->native_context());
- handle_scope_implementer_->EnterMicrotaskContext(
- Handle<Context>(context, this));
-
- MaybeHandle<Object> result;
- MaybeHandle<Object> maybe_exception;
-
- if (microtask->IsJSFunction()) {
- Handle<JSFunction> microtask_function =
- Handle<JSFunction>::cast(microtask);
- result = Execution::TryCall(
- this, microtask_function, factory()->undefined_value(), 0,
- nullptr, Execution::MessageHandling::kReport, &maybe_exception);
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
- &maybe_exception);
- } else {
- PromiseReactionJob(Handle<PromiseReactionJobInfo>::cast(microtask),
- &result, &maybe_exception);
- }
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ if (result->IsUndefined(this)) return;
- handle_scope_implementer_->LeaveMicrotaskContext();
+ Handle<FixedArray> queue = Handle<FixedArray>::cast(result);
+ int num_tasks = microtask_queue_bailout_count();
+ DCHECK_GE(microtask_queue_bailout_index(), 0);
- // If execution is terminating, just bail out.
- if (result.is_null() && maybe_exception.is_null()) {
- // Clear out any remaining callbacks in the queue.
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- set_pending_microtask_count(0);
- return;
- }
- }
- });
- }
+ Isolate* isolate = this;
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, int, i = microtask_queue_bailout_index(), i, i < num_tasks,
+ i++, {
+ Handle<Object> microtask(queue->get(i), this);
+
+ if (microtask->IsCallHandlerInfo()) {
+ Handle<CallHandlerInfo> callback_info =
+ Handle<CallHandlerInfo>::cast(microtask);
+ v8::MicrotaskCallback callback =
+ v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
+ void* data = v8::ToCData<void*>(callback_info->data());
+ callback(data);
+ } else {
+ SaveContext save(this);
+ Context* context;
+ if (microtask->IsJSFunction()) {
+ context = Handle<JSFunction>::cast(microtask)->context();
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ context = Handle<PromiseResolveThenableJobInfo>::cast(microtask)
+ ->context();
+ } else {
+ context =
+ Handle<PromiseReactionJobInfo>::cast(microtask)->context();
+ }
+
+ set_context(context->native_context());
+ handle_scope_implementer_->EnterMicrotaskContext(
+ Handle<Context>(context, this));
+
+ MaybeHandle<Object> result;
+ MaybeHandle<Object> maybe_exception;
+
+ if (microtask->IsJSFunction()) {
+ Handle<JSFunction> microtask_function =
+ Handle<JSFunction>::cast(microtask);
+ result = Execution::TryCall(
+ this, microtask_function, factory()->undefined_value(), 0,
+ nullptr, Execution::MessageHandling::kReport,
+ &maybe_exception);
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ PromiseResolveThenableJob(
+ Handle<PromiseResolveThenableJobInfo>::cast(microtask),
+ &result, &maybe_exception);
+ } else {
+ PromiseReactionJob(
+ Handle<PromiseReactionJobInfo>::cast(microtask), &result,
+ &maybe_exception);
+ }
+
+ handle_scope_implementer_->LeaveMicrotaskContext();
+
+ // If execution is terminating, just bail out.
+ if (result.is_null() && maybe_exception.is_null()) {
+ // Clear out any remaining callbacks in the queue.
+ heap()->set_microtask_queue(heap()->empty_fixed_array());
+ set_pending_microtask_count(0);
+ return;
+ }
+ }
+ });
+ } while (pending_microtask_count() > 0);
}
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
@@ -3884,10 +4009,6 @@ void Isolate::PrintWithTimestamp(const char* format, ...) {
va_end(arguments);
}
-wasm::WasmCodeManager* Isolate::wasm_code_manager() {
- return wasm_code_manager_.get();
-}
-
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 9e3de53675..8eca55ffd6 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -109,8 +109,7 @@ class Interpreter;
}
namespace wasm {
-class CompilationManager;
-class WasmCodeManager;
+class WasmEngine;
}
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
@@ -331,7 +330,7 @@ class ThreadLocalTop BASE_EMBEDDED {
Object* pending_exception_;
// TODO(kschimpf): Change this to a stack of caught exceptions (rather than
// just innermost catching try block).
- Object* wasm_caught_exception_;
+ Object* wasm_caught_exception_ = nullptr;
// Communication channel between Isolate::FindHandler and the CEntryStub.
Context* pending_handler_context_;
@@ -373,6 +372,9 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+ int microtask_queue_bailout_index_;
+ int microtask_queue_bailout_count_;
+
private:
void InitializeInternal();
@@ -382,10 +384,8 @@ class ThreadLocalTop BASE_EMBEDDED {
#if USE_SIMULATOR
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(bool, simulator_initialized, false) \
- V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr) \
- V(Redirection*, simulator_redirection, nullptr)
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr)
#else
#define ISOLATE_INIT_SIMULATOR_LIST(V)
@@ -675,6 +675,18 @@ class Isolate {
return &thread_local_top_.js_entry_sp_;
}
+ THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_index)
+ Address microtask_queue_bailout_index_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.microtask_queue_bailout_index_);
+ }
+
+ THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_count)
+ Address microtask_queue_bailout_count_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.microtask_queue_bailout_count_);
+ }
+
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
inline Handle<JSGlobalObject> global_object();
@@ -808,6 +820,11 @@ class Isolate {
// Un-schedule an exception that was caught by a TryCatch handler.
void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
void ReportPendingMessages();
+ void ReportPendingMessagesFromJavaScript();
+
+ // Implements code shared between the two above methods
+ void ReportPendingMessagesImpl(bool report_externally);
+
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
@@ -906,7 +923,7 @@ class Isolate {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
- V8_EXPORT_PRIVATE wasm::WasmCodeManager* wasm_code_manager();
+ wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@@ -991,12 +1008,6 @@ class Isolate {
static size_t non_disposed_isolates() {
return non_disposed_isolates_.Value();
}
-
- HistogramInfo* heap_histograms() { return heap_histograms_; }
-
- JSObject::SpillInformation* js_spill_information() {
- return &js_spill_information_;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
@@ -1065,7 +1076,7 @@ class Isolate {
// memory usage is expected.
void SetFeedbackVectorsForProfilingTools(Object* value);
- void InitializeVectorListFromHeap();
+ void MaybeInitializeVectorListFromHeap();
double time_millis_since_init() {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
@@ -1210,6 +1221,7 @@ class Isolate {
void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception);
+
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1233,6 +1245,14 @@ class Isolate {
return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
}
+ Address pending_microtask_count_address() {
+ return reinterpret_cast<Address>(&pending_microtask_count_);
+ }
+
+ Address handle_scope_implementer_address() {
+ return reinterpret_cast<Address>(&handle_scope_implementer_);
+ }
+
void DebugStateUpdated();
void SetPromiseHook(PromiseHook hook);
@@ -1259,10 +1279,6 @@ class Isolate {
return cancelable_task_manager_;
}
- wasm::CompilationManager* wasm_compilation_manager() {
- return wasm_compilation_manager_.get();
- }
-
const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
@@ -1303,9 +1319,6 @@ class Isolate {
#ifdef USE_SIMULATOR
base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
- base::Mutex* simulator_redirection_mutex() {
- return &simulator_redirection_mutex_;
- }
#endif
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
@@ -1558,8 +1571,6 @@ class Isolate {
#ifdef DEBUG
static base::AtomicNumber<size_t> non_disposed_isolates_;
- // A static array of histogram info for each type.
- HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
#endif
@@ -1634,8 +1645,6 @@ class Isolate {
CancelableTaskManager* cancelable_task_manager_;
- std::unique_ptr<wasm::CompilationManager> wasm_compilation_manager_;
-
debug::ConsoleDelegate* console_delegate_ = nullptr;
v8::Isolate::AbortOnUncaughtExceptionCallback
@@ -1643,7 +1652,6 @@ class Isolate {
#ifdef USE_SIMULATOR
base::Mutex simulator_i_cache_mutex_;
- base::Mutex simulator_redirection_mutex_;
#endif
bool allow_atomics_wait_;
@@ -1654,7 +1662,7 @@ class Isolate {
size_t elements_deletion_counter_ = 0;
- std::unique_ptr<wasm::WasmCodeManager> wasm_code_manager_;
+ std::unique_ptr<wasm::WasmEngine> wasm_engine_;
// The top entry of the v8::Context::BackupIncumbentScope stack.
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 80fd250d22..7605fc1a7d 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1118,64 +1118,6 @@ DEFINE_METHOD_LEN(
);
-function InnerArrayFind(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw %make_type_error(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return element;
- }
- }
-
- return;
-}
-
-
-// ES6 draft 07-15-13, section 15.4.3.23
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- find(predicate, thisArg) {
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFind(predicate, thisArg, array, length);
- },
- 1 /* Set function length */
-);
-
-
-function InnerArrayFindIndex(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw %make_type_error(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return i;
- }
- }
-
- return -1;
-}
-
-
-// ES6 draft 07-15-13, section 15.4.3.24
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- findIndex(predicate, thisArg) {
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFindIndex(predicate, thisArg, array, length);
- },
- 1 /* Set function length */
-);
-
-
// ES6, draft 04-05-14, section 22.1.3.6
DEFINE_METHOD_LEN(
GlobalArray.prototype,
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 08ef3ba520..32f826691d 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -99,6 +99,42 @@ function PostNatives(utils) {
utils.PostNatives = UNDEFINED;
}
+// ----------------------------------------------------------------------------
+// Object
+
+var iteratorSymbol = ImportNow("iterator_symbol");
+
+// ES6 7.3.9
+function GetMethod(obj, p) {
+ var func = obj[p];
+ if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
+ if (IS_CALLABLE(func)) return func;
+ throw %make_type_error(kCalledNonCallable, typeof func);
+}
+
+// ----------------------------------------------------------------------------
+// Iterator related spec functions.
+
+// ES6 7.4.1 GetIterator(obj, method)
+function GetIterator(obj, method) {
+ if (IS_UNDEFINED(method)) {
+ method = obj[iteratorSymbol];
+ }
+ if (!IS_CALLABLE(method)) {
+ throw %make_type_error(kNotIterable, obj);
+ }
+ var iterator = %_Call(method, obj);
+ if (!IS_RECEIVER(iterator)) {
+ throw %make_type_error(kNotAnIterator, iterator);
+ }
+ return iterator;
+}
+
+
+exports_container.GetIterator = GetIterator;
+exports_container.GetMethod = GetMethod;
+
+
// -----------------------------------------------------------------------
%OptimizeObjectForAddingMultipleProperties(utils, 14);
diff --git a/deps/v8/src/js/proxy.js b/deps/v8/src/js/proxy.js
deleted file mode 100644
index 4b6255a8ff..0000000000
--- a/deps/v8/src/js/proxy.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// ----------------------------------------------------------------------------
-// Imports
-//
-var GlobalProxy = global.Proxy;
-
-//----------------------------------------------------------------------------
-
-//Set up non-enumerable properties of the Proxy object.
-DEFINE_METHOD(
- GlobalProxy,
- revocable(target, handler) {
- var p = new GlobalProxy(target, handler);
- return {proxy: p, revoke: () => %JSProxyRevoke(p)};
- }
-);
-
-})
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
deleted file mode 100644
index 26dada3759..0000000000
--- a/deps/v8/src/js/v8natives.js
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// ----------------------------------------------------------------------------
-// Imports
-
-var GlobalObject = global.Object;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-
-// ----------------------------------------------------------------------------
-// Object
-
-// Set up non-enumerable functions on the Object.prototype object.
-DEFINE_METHOD(
- GlobalObject.prototype,
- // ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
- toLocaleString() {
- REQUIRE_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
- return this.toString();
- }
-);
-
-// ES6 7.3.9
-function GetMethod(obj, p) {
- var func = obj[p];
- if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
- if (IS_CALLABLE(func)) return func;
- throw %make_type_error(kCalledNonCallable, typeof func);
-}
-
-// ----------------------------------------------------------------------------
-// Iterator related spec functions.
-
-// ES6 7.4.1 GetIterator(obj, method)
-function GetIterator(obj, method) {
- if (IS_UNDEFINED(method)) {
- method = obj[iteratorSymbol];
- }
- if (!IS_CALLABLE(method)) {
- throw %make_type_error(kNotIterable, obj);
- }
- var iterator = %_Call(method, obj);
- if (!IS_RECEIVER(iterator)) {
- throw %make_type_error(kNotAnIterator, iterator);
- }
- return iterator;
-}
-
-// ----------------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.GetIterator = GetIterator;
- to.GetMethod = GetMethod;
-});
-
-})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 13f65705a3..57e7fff8c5 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -20,6 +20,38 @@
namespace v8 {
namespace internal {
+namespace {
+
+// A vector-like data structure that uses a larger vector for allocation, and
+// provides limited utility access. The original vector must not be used for the
+// duration, and it may even be reallocated. This allows vector storage to be
+// reused for the properties of sibling objects.
+template <typename Container>
+class VectorSegment {
+ public:
+ using value_type = typename Container::value_type;
+
+ explicit VectorSegment(Container* container)
+ : container_(*container), begin_(container->size()) {}
+ ~VectorSegment() { container_.resize(begin_); }
+
+ Vector<const value_type> GetVector() const {
+ return Vector<const value_type>(container_.data() + begin_,
+ container_.size() - begin_);
+ }
+
+ template <typename T>
+ void push_back(T&& value) {
+ container_.push_back(std::forward<T>(value));
+ }
+
+ private:
+ Container& container_;
+ const typename Container::size_type begin_;
+};
+
+} // namespace
+
MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
Handle<Object> object,
Handle<Object> reviver) {
@@ -107,11 +139,11 @@ JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
: source_(source),
source_length_(source->length()),
isolate_(isolate),
- factory_(isolate_->factory()),
zone_(isolate_->allocator(), ZONE_NAME),
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
- position_(-1) {
+ position_(-1),
+ properties_(&zone_) {
source_ = String::Flatten(source_);
pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
@@ -164,6 +196,9 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
}
Handle<Script> script(factory->NewScript(source_));
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
// We should sent compile error event because we compile JSON object in
// separated source file.
isolate()->debug()->OnCompileError(script);
@@ -333,7 +368,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
factory()->NewJSObject(object_constructor(), pretenure_);
Handle<Map> map(json_object->map());
int descriptor = 0;
- ZoneVector<Handle<Object>> properties(zone());
+ VectorSegment<ZoneVector<Handle<Object>>> properties(&properties_);
DCHECK_EQ(c0_, '{');
bool transitioning = true;
@@ -424,7 +459,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
DCHECK(!transitioning);
// Commit the intermediate state to the object and stop transitioning.
- CommitStateToJsonObject(json_object, map, &properties);
+ CommitStateToJsonObject(json_object, map, properties.GetVector());
JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
.Check();
@@ -432,7 +467,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
// If we transitioned until the very end, transition the map now.
if (transitioning) {
- CommitStateToJsonObject(json_object, map, &properties);
+ CommitStateToJsonObject(json_object, map, properties.GetVector());
} else {
while (MatchSkipWhiteSpace(',')) {
HandleScope local_scope(isolate());
@@ -480,15 +515,14 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
template <bool seq_one_byte>
void JsonParser<seq_one_byte>::CommitStateToJsonObject(
Handle<JSObject> json_object, Handle<Map> map,
- ZoneVector<Handle<Object>>* properties) {
+ Vector<const Handle<Object>> properties) {
JSObject::AllocateStorageForMap(json_object, map);
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
DescriptorArray* descriptors = json_object->map()->instance_descriptors();
- int length = static_cast<int>(properties->size());
- for (int i = 0; i < length; i++) {
- Handle<Object> value = (*properties)[i];
+ for (int i = 0; i < properties.length(); i++) {
+ Handle<Object> value = properties[i];
// Initializing store.
json_object->WriteToField(i, descriptors->GetDetails(i), *value);
}
@@ -697,7 +731,7 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
String::WriteToFlat(*prefix, dest, start, end);
while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
+ // Check for control character (0x00-0x1F) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (count >= length) {
// We need to create a longer sequential string for the result.
@@ -728,13 +762,13 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
SeqStringSet(seq_string, count++, '\x08');
break;
case 'f':
- SeqStringSet(seq_string, count++, '\x0c');
+ SeqStringSet(seq_string, count++, '\x0C');
break;
case 'n':
- SeqStringSet(seq_string, count++, '\x0a');
+ SeqStringSet(seq_string, count++, '\x0A');
break;
case 'r':
- SeqStringSet(seq_string, count++, '\x0d');
+ SeqStringSet(seq_string, count++, '\x0D');
break;
case 't':
SeqStringSet(seq_string, count++, '\x09');
@@ -862,7 +896,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
int beg_pos = position_;
// Fast case for Latin1 only without escape characters.
do {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
+ // Check for control character (0x00-0x1F) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (c0_ != '\\') {
if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index cab094591f..6566c92e40 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -135,7 +135,7 @@ class JsonParser BASE_EMBEDDED {
}
inline Isolate* isolate() { return isolate_; }
- inline Factory* factory() { return factory_; }
+ inline Factory* factory() { return isolate_->factory(); }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
static const int kInitialSpecialStringLength = 32;
@@ -145,7 +145,7 @@ class JsonParser BASE_EMBEDDED {
Zone* zone() { return &zone_; }
void CommitStateToJsonObject(Handle<JSObject> json_object, Handle<Map> map,
- ZoneVector<Handle<Object>>* properties);
+ Vector<const Handle<Object>> properties);
Handle<String> source_;
int source_length_;
@@ -153,11 +153,13 @@ class JsonParser BASE_EMBEDDED {
PretenureFlag pretenure_;
Isolate* isolate_;
- Factory* factory_;
Zone zone_;
Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
+
+ // Property handles are stored here inside ParseJsonObject.
+ ZoneVector<Handle<Object>> properties_;
};
} // namespace internal
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index c2b53a85bd..d77a761b13 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -47,39 +47,39 @@ const char* const JsonStringifier::JsonEscapeTable =
"p\0 q\0 r\0 s\0 "
"t\0 u\0 v\0 w\0 "
"x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 "
- "\200\0 \201\0 \202\0 \203\0 "
- "\204\0 \205\0 \206\0 \207\0 "
- "\210\0 \211\0 \212\0 \213\0 "
- "\214\0 \215\0 \216\0 \217\0 "
- "\220\0 \221\0 \222\0 \223\0 "
- "\224\0 \225\0 \226\0 \227\0 "
- "\230\0 \231\0 \232\0 \233\0 "
- "\234\0 \235\0 \236\0 \237\0 "
- "\240\0 \241\0 \242\0 \243\0 "
- "\244\0 \245\0 \246\0 \247\0 "
- "\250\0 \251\0 \252\0 \253\0 "
- "\254\0 \255\0 \256\0 \257\0 "
- "\260\0 \261\0 \262\0 \263\0 "
- "\264\0 \265\0 \266\0 \267\0 "
- "\270\0 \271\0 \272\0 \273\0 "
- "\274\0 \275\0 \276\0 \277\0 "
- "\300\0 \301\0 \302\0 \303\0 "
- "\304\0 \305\0 \306\0 \307\0 "
- "\310\0 \311\0 \312\0 \313\0 "
- "\314\0 \315\0 \316\0 \317\0 "
- "\320\0 \321\0 \322\0 \323\0 "
- "\324\0 \325\0 \326\0 \327\0 "
- "\330\0 \331\0 \332\0 \333\0 "
- "\334\0 \335\0 \336\0 \337\0 "
- "\340\0 \341\0 \342\0 \343\0 "
- "\344\0 \345\0 \346\0 \347\0 "
- "\350\0 \351\0 \352\0 \353\0 "
- "\354\0 \355\0 \356\0 \357\0 "
- "\360\0 \361\0 \362\0 \363\0 "
- "\364\0 \365\0 \366\0 \367\0 "
- "\370\0 \371\0 \372\0 \373\0 "
- "\374\0 \375\0 \376\0 \377\0 ";
+ "|\0 }\0 ~\0 \x7F\0 "
+ "\x80\0 \x81\0 \x82\0 \x83\0 "
+ "\x84\0 \x85\0 \x86\0 \x87\0 "
+ "\x88\0 \x89\0 \x8A\0 \x8B\0 "
+ "\x8C\0 \x8D\0 \x8E\0 \x8F\0 "
+ "\x90\0 \x91\0 \x92\0 \x93\0 "
+ "\x94\0 \x95\0 \x96\0 \x97\0 "
+ "\x98\0 \x99\0 \x9A\0 \x9B\0 "
+ "\x9C\0 \x9D\0 \x9E\0 \x9F\0 "
+ "\xA0\0 \xA1\0 \xA2\0 \xA3\0 "
+ "\xA4\0 \xA5\0 \xA6\0 \xA7\0 "
+ "\xA8\0 \xA9\0 \xAA\0 \xAB\0 "
+ "\xAC\0 \xAD\0 \xAE\0 \xAF\0 "
+ "\xB0\0 \xB1\0 \xB2\0 \xB3\0 "
+ "\xB4\0 \xB5\0 \xB6\0 \xB7\0 "
+ "\xB8\0 \xB9\0 \xBA\0 \xBB\0 "
+ "\xBC\0 \xBD\0 \xBE\0 \xBF\0 "
+ "\xC0\0 \xC1\0 \xC2\0 \xC3\0 "
+ "\xC4\0 \xC5\0 \xC6\0 \xC7\0 "
+ "\xC8\0 \xC9\0 \xCA\0 \xCB\0 "
+ "\xCC\0 \xCD\0 \xCE\0 \xCF\0 "
+ "\xD0\0 \xD1\0 \xD2\0 \xD3\0 "
+ "\xD4\0 \xD5\0 \xD6\0 \xD7\0 "
+ "\xD8\0 \xD9\0 \xDA\0 \xDB\0 "
+ "\xDC\0 \xDD\0 \xDE\0 \xDF\0 "
+ "\xE0\0 \xE1\0 \xE2\0 \xE3\0 "
+ "\xE4\0 \xE5\0 \xE6\0 \xE7\0 "
+ "\xE8\0 \xE9\0 \xEA\0 \xEB\0 "
+ "\xEC\0 \xED\0 \xEE\0 \xEF\0 "
+ "\xF0\0 \xF1\0 \xF2\0 \xF3\0 "
+ "\xF4\0 \xF5\0 \xF6\0 \xF7\0 "
+ "\xF8\0 \xF9\0 \xFA\0 \xFB\0 "
+ "\xFC\0 \xFD\0 \xFE\0 \xFF\0 ";
JsonStringifier::JsonStringifier(Isolate* isolate)
: isolate_(isolate), builder_(isolate), gap_(nullptr), indent_(0) {
@@ -682,7 +682,7 @@ bool JsonStringifier::DoNotEscape(uint8_t c) {
template <>
bool JsonStringifier::DoNotEscape(uint16_t c) {
- return c >= '#' && c != '\\' && c != 0x7f;
+ return c >= '#' && c != '\\' && c != 0x7F;
}
void JsonStringifier::NewLine() {
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 9ac0079ac2..4f59c2553c 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -479,14 +479,11 @@ void FilterForEnumerableProperties(Handle<JSReceiver> receiver,
if (type == kIndexed) {
uint32_t number;
CHECK(element->ToUint32(&number));
- attributes = args.Call(
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query()),
- number);
+ attributes = args.CallIndexedQuery(interceptor, number);
} else {
CHECK(element->IsName());
- attributes = args.Call(v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query()),
- Handle<Name>::cast(element));
+ attributes =
+ args.CallNamedQuery(interceptor, Handle<Name>::cast(element));
}
if (!attributes.is_null()) {
@@ -512,20 +509,10 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined(isolate)) {
if (type == kIndexed) {
- v8::IndexedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- const char* log_tag = "interceptor-indexed-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = enum_args.Call(enum_fun);
+ result = enum_args.CallIndexedEnumerator(interceptor);
} else {
DCHECK_EQ(type, kNamed);
- v8::GenericNamedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- const char* log_tag = "interceptor-named-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = enum_args.Call(enum_fun);
+ result = enum_args.CallNamedEnumerator(interceptor);
}
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
@@ -790,7 +777,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
return Nothing<bool>();
}
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate_);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate_);
// 5. Let trap be ? GetMethod(handler, "ownKeys").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index 1dc8849812..eb93397518 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -18,7 +18,10 @@ namespace internal {
class Label {
public:
- enum Distance { kNear, kFar };
+ enum Distance {
+ kNear, // near jump: 8 bit displacement (signed)
+ kFar // far jump: 32 bit displacement (signed)
+ };
Label() = default;
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index c75eea6fd8..93818ef710 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -67,10 +67,7 @@ LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
int layout_word_index = 0;
int layout_bit_index = 0;
- if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
- CHECK(false);
- return this;
- }
+ CHECK(GetIndexes(field_index, &layout_word_index, &layout_bit_index));
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
if (IsSlowLayout()) {
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 7f8b311f3c..0e6869805c 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -7,7 +7,7 @@
#include <iosfwd>
-#include "src/objects.h"
+#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 833e37a290..39d9525eff 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -10,6 +10,7 @@
#include "include/libplatform/libplatform.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
+#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
@@ -84,6 +85,7 @@ DefaultPlatform::DefaultPlatform(
: thread_pool_size_(0),
idle_task_support_(idle_task_support),
tracing_controller_(std::move(tracing_controller)),
+ page_allocator_(new v8::base::PageAllocator()),
time_function_for_testing_(nullptr) {
if (!tracing_controller_) {
tracing::TracingController* controller = new tracing::TracingController();
@@ -254,5 +256,9 @@ Platform::StackTracePrinter DefaultPlatform::GetStackTracePrinter() {
return PrintStackTrace;
}
+v8::PageAllocator* DefaultPlatform::GetPageAllocator() {
+ return page_allocator_.get();
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 3280a7aa7c..b73f38a5fe 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -27,6 +27,7 @@ class Thread;
class WorkerThread;
class DefaultForegroundTaskRunner;
class DefaultBackgroundTaskRunner;
+class DefaultPageAllocator;
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
@@ -70,6 +71,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
double CurrentClockTimeMillis() override;
v8::TracingController* GetTracingController() override;
StackTracePrinter GetStackTracePrinter() override;
+ v8::PageAllocator* GetPageAllocator() override;
private:
static const int kMaxThreadPoolSize;
@@ -82,6 +84,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
foreground_task_runner_map_;
std::unique_ptr<TracingController> tracing_controller_;
+ std::unique_ptr<PageAllocator> page_allocator_;
TimeFunction time_function_for_testing_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index b2eb44796d..938a84bffd 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -42,7 +42,6 @@ Log::Log(Logger* logger, const char* file_name)
if (FLAG_log_all) {
FLAG_log_api = true;
FLAG_log_code = true;
- FLAG_log_gc = true;
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_internal_timer_events = true;
@@ -152,7 +151,7 @@ void Log::MessageBuilder::AppendStringPart(String* str, int len) {
// TODO(cbruni): unify escaping.
for (int i = 0; i < len; i++) {
uc32 c = str->Get(i);
- if (c <= 0xff) {
+ if (c <= 0xFF) {
AppendCharacter(static_cast<char>(c));
} else {
// Escape any non-ascii range characters.
@@ -174,7 +173,7 @@ void Log::MessageBuilder::AppendCharacter(char c) {
if (c >= 32 && c <= 126) {
if (c == ',') {
// Escape commas (log field separator) directly.
- os << "\x2c";
+ os << "\\x2C";
} else {
// Directly append any printable ascii character.
os << c;
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 99ed03f34a..feb14ea1a0 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -30,9 +30,9 @@ class Log {
void stop() { is_stopped_ = true; }
static bool InitLogAtStart() {
- return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
- FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
- FLAG_perf_basic_prof || FLAG_perf_prof || FLAG_log_source_code ||
+ return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_handles ||
+ FLAG_log_suspect || FLAG_ll_prof || FLAG_perf_basic_prof ||
+ FLAG_perf_prof || FLAG_log_source_code ||
FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic ||
FLAG_log_function_events;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index b529df7bbe..f5d5be6848 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1049,27 +1049,25 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// Make sure the script is written to the log file.
Script* script = Script::cast(script_object);
int script_id = script->id();
- if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
- return;
- }
-
- // This script has not been logged yet.
- logged_source_code_.insert(script_id);
- Object* source_object = script->source();
- if (source_object->IsString()) {
- String* source_code = String::cast(source_object);
- msg << "script" << kNext << script_id << kNext;
+ if (logged_source_code_.find(script_id) == logged_source_code_.end()) {
+ // This script has not been logged yet.
+ logged_source_code_.insert(script_id);
+ Object* source_object = script->source();
+ if (source_object->IsString()) {
+ String* source_code = String::cast(source_object);
+ msg << "script" << kNext << script_id << kNext;
+
+ // Log the script name.
+ if (script->name()->IsString()) {
+ msg << String::cast(script->name()) << kNext;
+ } else {
+ msg << "<unknown>" << kNext;
+ }
- // Log the script name.
- if (script->name()->IsString()) {
- msg << String::cast(script->name()) << kNext;
- } else {
- msg << "<unknown>" << kNext;
+ // Log the source code.
+ msg << source_code;
+ msg.WriteToLogFile();
}
-
- // Log the source code.
- msg << source_code;
- msg.WriteToLogFile();
}
// We log source code information in the form:
@@ -1294,34 +1292,6 @@ void Logger::FunctionEvent(const char* reason, Script* script, int script_id,
msg.WriteToLogFile();
}
-void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- // Using non-relative system time in order to be able to synchronize with
- // external memory profiling events (e.g. DOM memory size).
- msg << "heap-sample-begin" << kNext << space << kNext << kind << kNext;
- msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- msg << "heap-sample-end" << kNext << space << kNext << kind;
- msg.WriteToLogFile();
-}
-
-
-void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- Log::MessageBuilder msg(log_);
- msg << "heap-sample-item" << kNext << type << kNext << number << kNext
- << bytes;
- msg.WriteToLogFile();
-}
-
-
void Logger::RuntimeCallTimerEvent() {
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallCounter* counter = stats->current_counter();
@@ -1389,6 +1359,7 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
int line = -1;
int column = -1;
Address pc = 0;
+
if (!isolate_->bootstrapper()->IsActive()) {
pc = isolate_->GetAbstractPC(&line, &column);
}
@@ -1412,6 +1383,15 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
msg.WriteToLogFile();
}
+void Logger::MapCreate(Map* map) {
+ if (!log_->IsEnabled() || !FLAG_trace_maps) return;
+ DisallowHeapAllocation no_gc;
+ Log::MessageBuilder msg(log_);
+ msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
+ << reinterpret_cast<void*>(map);
+ msg.WriteToLogFile();
+}
+
void Logger::MapDetails(Map* map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
// Disable logging Map details during bootstrapping since we use LogMaps() to
@@ -1421,9 +1401,11 @@ void Logger::MapDetails(Map* map) {
Log::MessageBuilder msg(log_);
msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< reinterpret_cast<void*>(map) << kNext;
- std::ostringstream buffer;
- map->PrintMapDetails(buffer);
- msg << buffer.str().c_str();
+ if (FLAG_trace_maps_details) {
+ std::ostringstream buffer;
+ map->PrintMapDetails(buffer);
+ msg << buffer.str().c_str();
+ }
msg.WriteToLogFile();
}
@@ -1575,6 +1557,16 @@ void Logger::LogCodeObjects() {
}
}
+void Logger::LogBytecodeHandler(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale,
+ Code* code) {
+ std::string bytecode_name =
+ interpreter::Bytecodes::ToString(bytecode, operand_scale);
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(code), bytecode_name.c_str()));
+}
+
void Logger::LogBytecodeHandlers() {
const interpreter::OperandScale kOperandScales[] = {
#define VALUE(Name, _) interpreter::OperandScale::k##Name,
@@ -1590,11 +1582,7 @@ void Logger::LogBytecodeHandlers() {
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
- std::string bytecode_name =
- interpreter::Bytecodes::ToString(bytecode, operand_scale);
- PROFILE(isolate_, CodeCreateEvent(
- CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(code), bytecode_name.c_str()));
+ LogBytecodeHandler(bytecode, operand_scale, code);
}
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 7efa50b8de..8305eb1001 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -37,7 +37,7 @@ namespace internal {
//
// --log-all
// Log all events to the file, default is off. This is the same as combining
-// --log-api, --log-code, --log-gc, and --log-regexp.
+// --log-api, --log-code, and --log-regexp.
//
// --log-api
// Log API events to the logfile, default is off. --log-api implies --log.
@@ -46,10 +46,6 @@ namespace internal {
// Log code (create, move, and delete) events to the logfile, default is off.
// --log-code implies --log.
//
-// --log-gc
-// Log GC heap samples after each GC that can be processed by hp2ps, default
-// is off. --log-gc implies --log.
-//
// --log-regexp
// Log creation and use of regular expressions, Default is off.
// --log-regexp implies --log.
@@ -205,21 +201,9 @@ class Logger : public CodeEventListener {
void MapEvent(const char* type, Map* from, Map* to,
const char* reason = nullptr,
HeapObject* name_or_sfi = nullptr);
+ void MapCreate(Map* map);
void MapDetails(Map* map);
- // ==== Events logged by --log-gc. ====
- // Heap sampling events: start, end, and individual types.
- void HeapSampleBeginEvent(const char* space, const char* kind);
- void HeapSampleEndEvent(const char* space, const char* kind);
- void HeapSampleItemEvent(const char* type, int number, int bytes);
- void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- void HeapSampleJSRetainersEvent(const char* constructor,
- const char* event);
- void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
uintptr_t end, intptr_t aslr_slide);
@@ -258,6 +242,8 @@ class Logger : public CodeEventListener {
void LogCodeObjects();
// Used for logging bytecode handlers found in the snapshot.
void LogBytecodeHandlers();
+ void LogBytecodeHandler(interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale, Code* code);
// Logs all Mpas foind in the heap.
void LogMaps();
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 2d3cc3253e..71902dff84 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -211,7 +211,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
auto root =
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
- unsigned int magic = 0xbbbbbbbb;
+ unsigned int magic = 0xBBBBBBBB;
isolate->PushStackTraceAndDie(magic, *receiver, nullptr, magic);
}
return Handle<JSReceiver>::cast(root);
@@ -237,22 +237,45 @@ void LookupIterator::ReloadPropertyInformation() {
DCHECK(IsFound() || !holder_->HasFastProperties());
}
+namespace {
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
+ static uint32_t context_slots[] = {
+#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype, size) \
+ Context::TYPE##_ARRAY_FUN_INDEX,
+
+ TYPED_ARRAYS(TYPED_ARRAY_CONTEXT_SLOTS)
+#undef TYPED_ARRAY_CONTEXT_SLOTS
+ };
+
+ if (!holder->IsJSFunction()) return false;
+
+ return std::any_of(
+ std::begin(context_slots), std::end(context_slots),
+ [=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
+}
+} // namespace
+
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (*name_ == heap()->constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray()) {
+ if (holder_->IsJSArray() || holder_->IsJSTypedArray()) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
} else if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
- // Setting the constructor of Array.prototype of any realm also needs
- // to invalidate the species protector
+ // Setting the constructor of Array.prototype or %TypedArray%.prototype of
+ // any realm also needs to invalidate the species protector.
+ // For typed arrays, we check a prototype of this holder since TypedArrays
+ // have different prototypes for each type, and their parent prototype is
+ // pointing the same TYPED_ARRAY_PROTOTYPE.
if (isolate_->IsInAnyContext(*holder_,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(holder_->map()->prototype(),
+ Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::
kArrayPrototypeConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
@@ -260,9 +283,10 @@ void LookupIterator::InternalUpdateProtector() {
}
} else if (*name_ == heap()->species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
- // Setting the Symbol.species property of any Array constructor invalidates
- // the species protector
- if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ // Setting the Symbol.species property of any Array or TypedArray
+ // constructor invalidates the species protector
+ if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX) ||
+ IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
isolate_->InvalidateArraySpeciesProtector();
@@ -479,6 +503,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget()));
holder_ = receiver;
if (receiver->IsJSGlobalObject()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
state_ = DATA;
return;
}
@@ -495,6 +520,9 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(),
isolate_);
int entry;
+ if (receiver->map()->is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ }
dictionary = NameDictionary::Add(dictionary, name(),
isolate_->factory()->uninitialized_value(),
property_details_, &entry);
@@ -521,8 +549,8 @@ void LookupIterator::Delete() {
bool is_prototype_map = holder->map()->is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
- ? &RuntimeCallStats::PrototypeObject_DeleteProperty
- : &RuntimeCallStats::Object_DeleteProperty);
+ ? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
+ : RuntimeCallCounterId::kObject_DeleteProperty);
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
@@ -638,9 +666,12 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
ReloadPropertyInformation<true>();
} else {
- PropertyNormalizationMode mode = receiver->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
+ PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
+ if (receiver->map()->is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map());
+ mode = KEEP_INOBJECT_PROPERTIES;
+ }
+
// Normalize object to make this operation simple.
JSObject::NormalizeProperties(receiver, mode, 0,
"TransitionToAccessorPair");
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 4502b2fdc2..63e3c7a462 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -39,7 +39,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
kIntSize * kBitsPerByte,
"Bit masks of MachineRepresentation should fit in an int");
-const char* MachineReprToString(MachineRepresentation);
+V8_EXPORT_PRIVATE const char* MachineReprToString(MachineRepresentation);
enum class MachineSemantic : uint8_t {
kNone,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index e9d2be1843..5876e5f5e4 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -12,7 +12,7 @@
#include "src/keys.h"
#include "src/objects/frame-array-inl.h"
#include "src/string-builder.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -189,11 +189,10 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
namespace {
Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
+ if (!script->has_eval_from_shared())
return isolate->heap()->undefined_value();
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
if (shared->name()->BooleanValue()) {
return shared->name();
@@ -203,11 +202,10 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
}
Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
+ if (!script->has_eval_from_shared())
return isolate->heap()->undefined_value();
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
+ Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
return eval_from_shared->script()->IsScript()
? eval_from_shared->script()
: isolate->heap()->undefined_value();
@@ -674,10 +672,10 @@ Handle<Object> WasmStackFrame::GetFunction() const {
Handle<Object> WasmStackFrame::GetFunctionName() {
Handle<Object> name;
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
- if (!WasmCompiledModule::GetFunctionNameOrNull(isolate_, compiled_module,
- wasm_func_index_)
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
+ if (!WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
+ wasm_func_index_)
.ToHandle(&name)) {
name = isolate_->factory()->null_value();
}
@@ -687,12 +685,13 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
MaybeHandle<String> WasmStackFrame::ToString() {
IncrementalStringBuilder builder(isolate_);
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
MaybeHandle<String> module_name =
- WasmCompiledModule::GetModuleNameOrNull(isolate_, compiled_module);
- MaybeHandle<String> function_name = WasmCompiledModule::GetFunctionNameOrNull(
- isolate_, compiled_module, wasm_func_index_);
+ WasmSharedModuleData::GetModuleNameOrNull(isolate_, shared);
+ MaybeHandle<String> function_name =
+ WasmSharedModuleData::GetFunctionNameOrNull(isolate_, shared,
+ wasm_func_index_);
bool has_name = !module_name.is_null() || !function_name.is_null();
if (has_name) {
if (module_name.is_null()) {
@@ -738,7 +737,8 @@ Handle<Object> WasmStackFrame::Null() const {
bool WasmStackFrame::HasScript() const { return true; }
Handle<Script> WasmStackFrame::GetScript() const {
- return handle(wasm_instance_->compiled_module()->script(), isolate_);
+ return handle(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
}
AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
@@ -762,13 +762,15 @@ Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return handle(script->name(), isolate_);
}
Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK_EQ(Script::TYPE_NORMAL, script->type());
return ScriptNameOrSourceUrl(script, isolate_);
}
@@ -780,24 +782,26 @@ int AsmJsWasmStackFrame::GetPosition() const {
? Handle<AbstractCode>::cast(code_.GetCode())->SourcePosition(offset_)
: FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
code_.GetWasmCode(), offset_);
- Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
- isolate_);
+ Handle<WasmSharedModuleData> shared(
+ wasm_instance_->compiled_module()->shared(), isolate_);
DCHECK_LE(0, byte_offset);
- return WasmCompiledModule::GetSourcePosition(
- compiled_module, wasm_func_index_, static_cast<uint32_t>(byte_offset),
+ return WasmSharedModuleData::GetSourcePosition(
+ shared, wasm_func_index_, static_cast<uint32_t>(byte_offset),
is_at_number_conversion_);
}
int AsmJsWasmStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetLineNumber(script, GetPosition()) + 1;
}
int AsmJsWasmStackFrame::GetColumnNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->compiled_module()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->compiled_module()->shared()->script(),
+ isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index bf0c8db355..923535517a 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -267,6 +267,8 @@ class ErrorUtils : public AllStatic {
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
"function") \
+ T(ArgumentsDisallowedInInitializer, \
+ "'arguments' is not allowed in class field initializer") \
T(ArrayBufferTooShort, \
"Derived ArrayBuffer constructor created a buffer which was too small") \
T(ArrayBufferSpeciesThis, \
@@ -308,7 +310,7 @@ class ErrorUtils : public AllStatic {
T(ConstructorNotFunction, "Constructor % requires 'new'") \
T(ConstructorNotReceiver, "The .constructor property is not an object") \
T(CurrencyCode, "Currency code is required with currency style.") \
- T(CyclicModuleDependency, "Detected cycle while resolving name '%'") \
+ T(CyclicModuleDependency, "Detected cycle while resolving name '%' in '%'") \
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
@@ -520,6 +522,7 @@ class ErrorUtils : public AllStatic {
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(BigIntDivZero, "Division by zero") \
+ T(BigIntNegativeExponent, "Exponent must be positive") \
T(BigIntTooBig, "Maximum BigInt size exceeded") \
T(DateRange, "Provided date is not in valid range.") \
T(ExpectedTimezoneID, \
@@ -563,7 +566,7 @@ class ErrorUtils : public AllStatic {
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
T(AmbiguousExport, \
- "The requested module contains conflicting star exports for name '%'") \
+ "The requested module '%' contains conflicting star exports for name '%'") \
T(BadGetterArity, "Getter must not have any formal parameters.") \
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
T(BigIntInvalidString, "Invalid BigInt string") \
@@ -698,7 +701,7 @@ class ErrorUtils : public AllStatic {
"Lexical declaration cannot appear in a single-statement context") \
T(UnknownLabel, "Undefined label '%'") \
T(UnresolvableExport, \
- "The requested module does not provide an export named '%'") \
+ "The requested module '%' does not provide an export named '%'") \
T(UnterminatedArgList, "missing ) after argument list") \
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
T(UnterminatedTemplate, "Unterminated template literal") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index e42210ea0e..803c16b829 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -77,15 +77,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- rmode_ == EMBEDDED_OBJECT ||
- rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -123,18 +121,6 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -146,12 +132,12 @@ void Assembler::deserialization_set_special_target_at(
set_target_address_at(
isolate,
instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
- code, target);
+ code ? code->constant_pool() : nullptr, target);
} else {
set_target_address_at(
isolate,
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
- target);
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code ? code->constant_pool() : nullptr, target);
}
}
@@ -200,21 +186,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -227,7 +213,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -281,7 +267,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index b5719a3add..bd540346c0 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -192,21 +192,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+ return reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -308,7 +310,7 @@ const Instr kSwRegFpNegOffsetPattern =
SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
@@ -788,7 +790,7 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// Use just lui and jic instructions. Insert lower part of the target address in
// jic offset part. Since jic sign-extends offset and then add it with register,
// before that addition, difference between upper part of the target address and
-// upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
+// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
int16_t& jic_offset) {
@@ -2001,7 +2003,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// about -64KB to about +64KB, allowing further addition of 4 when accessing
// 64-bit variables with two 32-bit accesses.
constexpr int32_t kMinOffsetForSimpleAdjustment =
- 0x7ff8; // Max int16_t that's a multiple of 8.
+ 0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
@@ -2237,7 +2239,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK_EQ(code & ~0xfffff, 0);
+ DCHECK_EQ(code & ~0xFFFFF, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2494,7 +2496,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 76f3245e2c..4c68e730b3 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -238,6 +238,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -581,10 +582,6 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint32_t target,
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 3485e146ea..7ae3451f34 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -238,7 +238,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// (happens only when input is MIN_INT).
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
- __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
+ __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
+ Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -527,7 +528,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// caller fp |
// function slot | entry frame
// context slot |
- // bad fp (0xff...f) |
+ // bad fp (0xFF...F) |
// callee saved registers + ra
// 4 args slots
// args
@@ -589,13 +590,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// callee saved registers + ra
// 4 args slots
// args
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate, JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate, JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -646,8 +641,8 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ lw(t0, MemOperand(t9));
- __ Assert(ne, kReceivedInvalidReturnAddress, t0,
- Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
}
__ Jump(t9);
}
@@ -761,7 +756,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -803,7 +798,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
+ __ Assert(eq, AbortReason::kExpectedAllocationSite, t1, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
@@ -826,7 +821,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -901,11 +896,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(t0, t0, t1);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t1, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
+ Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(a2, t0);
@@ -983,11 +978,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
+ Operand(MAP_TYPE));
}
// Figure out the right elements kind.
@@ -1002,8 +997,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
- Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
@@ -1103,7 +1100,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ sw(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
__ lw(a1, MemOperand(s3, kLevelOffset));
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 6205bcd202..c07422ff5f 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
return stub;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -97,7 +96,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
@@ -180,7 +179,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ lw(t0, MemOperand(a1));
@@ -264,7 +263,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
@@ -436,7 +435,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
@@ -545,8 +544,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -557,8 +555,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -574,8 +571,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 420453aad0..f27bdc9b68 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -421,7 +421,7 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t target = static_cast<uint32_t>(instr->Imm26Value())
<< kImmFieldShift;
- target = (reinterpret_cast<uint32_t>(instr) & ~0xfffffff) | target;
+ target = (reinterpret_cast<uint32_t>(instr) & ~0xFFFFFFF) | target;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", target);
}
@@ -456,7 +456,7 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
void Decoder::PrintPCImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
- uint32_t pc_mask = ~0xfffffff;
+ uint32_t pc_mask = ~0xFFFFFFF;
uint32_t pc = ((uint32_t)(instr + 1) & pc_mask) | (imm26 << 2);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 4d8b9966fa..795fdc4af8 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return a0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return t0; }
-
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index de5de02f09..5c89467cd8 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -296,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
lw(scratch, MemOperand(address));
- Assert(eq, kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
@@ -825,7 +825,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- rotr(rd, rs, rt.immediate() & 0x1f);
+ rotr(rd, rs, rt.immediate() & 0x1F);
}
} else {
if (rt.is_reg()) {
@@ -841,8 +841,8 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- srl(scratch, rs, rt.immediate() & 0x1f);
- sll(rd, rs, (0x20 - (rt.immediate() & 0x1f)) & 0x1f);
+ srl(scratch, rs, rt.immediate() & 0x1F);
+ sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F);
or_(rd, rd, scratch);
}
}
@@ -3763,9 +3763,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
li(t2,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
@@ -3898,7 +3900,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
if (FLAG_debug_code) {
- Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
+ Operand(dst_reg));
}
// Restore caller's frame pointer and return address now as they will be
@@ -4491,13 +4494,13 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
@@ -4506,11 +4509,11 @@ void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4823,7 +4826,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(ne, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -4834,7 +4837,7 @@ void MacroAssembler::AssertSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(eq, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -4842,9 +4845,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
+ Operand(FIXED_ARRAY_TYPE));
}
}
@@ -4852,9 +4857,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(JS_FUNCTION_TYPE));
}
}
@@ -4863,9 +4870,11 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
}
}
@@ -4873,7 +4882,8 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
@@ -4885,7 +4895,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- Abort(kOperandIsNotAGeneratorObject);
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -4899,7 +4909,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Branch(&done_checking, eq, object, Operand(scratch));
lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5127,20 +5137,11 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- if (IsMipsArchVariant(kMips32r6)) {
- uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(Operand(function).immediate(), lui_offset,
- jialc_offset);
- if (MustUseReg(Operand(function).rmode())) {
- RecordRelocInfo(Operand(function).rmode(), Operand(function).immediate());
- }
- lui(t9, lui_offset);
- CallCFunctionHelper(t9, jialc_offset, num_reg_arguments,
- num_double_arguments);
- } else {
- li(t9, Operand(function));
- CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
- }
+ // Linux/MIPS convention demands that register t9 contains
+ // the address of the function being call in case of
+ // Position independent code
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
@@ -5197,6 +5198,11 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
function_base = t9;
}
+ if (function_offset != 0) {
+ addiu(t9, t9, function_offset);
+ function_offset = 0;
+ }
+
Call(function_base, function_offset);
int stack_passed_arguments = CalculateStackPassedWords(
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 52525ad9bc..8c70eb54a3 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -166,13 +166,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 342f27666d..4994418ef5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -60,8 +60,8 @@ class MipsDebugger {
void PrintAllRegsIncludingFPU();
private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ // We set the breakpoint code to 0xFFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -808,6 +808,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -878,21 +882,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
@@ -934,101 +929,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int32_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::CustomMatcherHashMap::Entry* entry = i_cache->Start();
- entry != nullptr; entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1105,7 +1005,7 @@ void Simulator::set_fpu_register_double(int fpureg, double value) {
} else {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
int64_t i64 = bit_cast<int64_t>(value);
- set_fpu_register_word(fpureg, i64 & 0xffffffff);
+ set_fpu_register_word(fpureg, i64 & 0xFFFFFFFF);
set_fpu_register_word(fpureg + 1, i64 >> 32);
}
}
@@ -1152,19 +1052,19 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
@@ -2204,7 +2104,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int32_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
- return *ptr & 0xff;
+ return *ptr & 0xFF;
}
@@ -2318,7 +2218,7 @@ void Simulator::SoftwareInterrupt() {
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
@@ -2603,7 +2503,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
watched_stops_[code].count = 0;
@@ -3102,8 +3002,8 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Extracting sign, exponent and mantissa from the input double
uint32_t sign = (classed >> 63) & 1;
- uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
- uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007FF;
+ uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF;
uint64_t result;
double dResult;
@@ -3124,7 +3024,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Setting flags if double is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && exponent == 0x7ff) {
+ if (!negInf && !posInf && exponent == 0x7FF) {
quietNan = ((mantissa & 0x0008000000000000) != 0) &&
((mantissa & (0x0008000000000000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3417,8 +3317,8 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Extracting sign, exponent and mantissa from the input float
uint32_t sign = (classed >> 31) & 1;
- uint32_t exponent = (classed >> 23) & 0x000000ff;
- uint32_t mantissa = classed & 0x007fffff;
+ uint32_t exponent = (classed >> 23) & 0x000000FF;
+ uint32_t mantissa = classed & 0x007FFFFF;
uint32_t result;
float fResult;
@@ -3439,7 +3339,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Setting flags if float is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && (exponent == 0xff)) {
+ if (!negInf && !posInf && (exponent == 0xFF)) {
quietNan = ((mantissa & 0x00200000) == 0) &&
((mantissa & (0x00200000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3994,12 +3894,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULT:
i64hilo = static_cast<int64_t>(rs()) * static_cast<int64_t>(rt());
if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
@@ -4013,12 +3913,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULTU:
u64hilo = static_cast<uint64_t>(rs_u()) * static_cast<uint64_t>(rt_u());
if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
@@ -4265,7 +4165,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 4; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -5258,8 +5158,8 @@ void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
wd_p[2 * i + 1] = ws_p[2 * i + 1];
break;
case VSHF: {
- const int mask_not_valid = 0xc0;
- const int mask_6_bits = 0x3f;
+ const int mask_not_valid = 0xC0;
+ const int mask_6_bits = 0x3F;
if ((wd_p[i] & mask_not_valid)) {
wd_p[i] = 0;
} else {
@@ -5658,7 +5558,7 @@ void Simulator::DecodeTypeMsa3RF() {
break; \
} \
/* Infinity */ \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} else if (aExp == 0 && aFrac == 0) { \
dst = PACK_FLOAT16(aSign, 0, 0); \
@@ -5672,13 +5572,13 @@ void Simulator::DecodeTypeMsa3RF() {
aExp -= 0x71; \
if (aExp < 1) { \
/* Will be denormal in halfprec */ \
- mask = 0x00ffffff; \
+ mask = 0x00FFFFFF; \
if (aExp >= -11) { \
mask >>= 11 + aExp; \
} \
} else { \
/* Normal number in halfprec */ \
- mask = 0x00001fff; \
+ mask = 0x00001FFF; \
} \
switch (MSACSR_ & 3) { \
case kRoundToNearest: \
@@ -5699,7 +5599,7 @@ void Simulator::DecodeTypeMsa3RF() {
} \
rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} \
aFrac += increment; \
@@ -6213,8 +6113,8 @@ template <typename T_int, typename T_fp, typename T_reg>
T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
-#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
-#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF)
#define PACK_FLOAT32(sign, exp, frac) \
static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
#define FEXUP_DF(src_index) \
@@ -6224,9 +6124,9 @@ T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
aSign = EXTRACT_FLOAT16_SIGN(element); \
aExp = EXTRACT_FLOAT16_EXP(element); \
aFrac = EXTRACT_FLOAT16_FRAC(element); \
- if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ if (V8_LIKELY(aExp && aExp != 0x1F)) { \
return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
- } else if (aExp == 0x1f) { \
+ } else if (aExp == 0x1F) { \
if (aFrac) { \
return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
} else { \
@@ -6389,7 +6289,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
+ uint32_t oe_imm16 = 0xFFFF & imm16;
// Sign extended immediate.
int32_t se_imm16 = imm16;
@@ -6438,11 +6338,11 @@ void Simulator::DecodeTypeImmediate() {
const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
if (do_branch) {
if (FLAG_debug_code) {
- int16_t bits = imm16 & 0xfc;
+ int16_t bits = imm16 & 0xFC;
if (imm16 >= 0) {
CHECK_EQ(bits, 0);
} else {
- CHECK_EQ(bits ^ 0xfc, 0);
+ CHECK_EQ(bits ^ 0xFC, 0);
}
}
// jump range :[pc + kInstrSize - 512 * kInstrSize,
@@ -6899,7 +6799,7 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case ADDIUPC: {
- int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfff80000 : 0);
+ int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xFFF80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
}
@@ -6987,7 +6887,7 @@ void Simulator::DecodeTypeJump() {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
- int32_t pc_high_bits = current_pc & 0xf0000000;
+ int32_t pc_high_bits = current_pc & 0xF0000000;
// Next pc.
int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
@@ -7150,18 +7050,16 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, fp_val);
}
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments.
// First four arguments passed in registers.
- DCHECK_GE(argument_count, 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
+ int reg_arg_count = std::min(4, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
@@ -7173,10 +7071,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
+ (argument_count - reg_arg_count) * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -7185,8 +7081,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int32_t result = get_register(v0);
- return result;
+ return get_register(v0);
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index fbc4ad19fb..0c417becd5 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
@@ -16,63 +15,12 @@
#include "src/allocation.h"
#include "src/mips/constants-mips.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -143,7 +91,7 @@ class SimInstruction : public InstructionGetters<SimInstructionBase> {
}
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class MipsDebugger;
@@ -223,7 +171,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@@ -288,15 +236,11 @@ class Simulator {
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
@@ -310,6 +254,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -332,6 +279,9 @@ class Simulator {
Unpredictable = 0xbadbeaf
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -557,11 +507,6 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -616,42 +561,8 @@ class Simulator {
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
- p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 2cb3374f8e..ded3da224c 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -77,8 +77,8 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -116,18 +116,6 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -136,7 +124,7 @@ void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
set_target_address_at(
isolate, instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
- code, target);
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
@@ -170,21 +158,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
@@ -198,7 +186,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -246,7 +234,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 5099ec1db9..a056f66849 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -170,22 +170,23 @@ bool RelocInfo::IsInConstantPool() {
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ (Assembler::target_address_at(pc_, constant_pool_))));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -287,7 +288,7 @@ const Instr kSwRegFpNegOffsetPattern =
SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
-const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
@@ -2159,7 +2160,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// about -64KB to about +64KB, allowing further addition of 4 when accessing
// 64-bit variables with two 32-bit accesses.
constexpr int32_t kMinOffsetForSimpleAdjustment =
- 0x7ff8; // Max int16_t that's a multiple of 8.
+ 0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
@@ -2486,7 +2487,7 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
- DCHECK_EQ(code & ~0xfffff, 0);
+ DCHECK_EQ(code & ~0xFFFFF, 0);
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
@@ -2896,7 +2897,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
- *lo = i & 0xffffffff;
+ *lo = i & 0xFFFFFFFF;
*hi = i >> 32;
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index cdb8be46cd..3530c7e7b2 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -238,6 +238,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -590,10 +591,6 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(isolate, pc, target, icache_flush_mode);
}
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint64_t target,
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 5d8cee7787..f8075885a9 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -237,7 +237,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// (happens only when input is MIN_INT).
__ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
- __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
+ __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
+ Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -525,7 +526,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// caller fp |
// function slot | entry frame
// context slot |
- // bad fp (0xff...f) |
+ // bad fp (0xFF...F) |
// callee saved registers + ra
// [ O32: 4 args slots]
// args
@@ -587,13 +588,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// callee saved registers + ra
// [ O32: 4 args slots]
// args
-
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate, JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate, JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -644,8 +639,8 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ Uld(a4, MemOperand(t9));
- __ Assert(ne, kReceivedInvalidReturnAddress, a4,
- Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
}
__ Jump(t9);
}
@@ -760,7 +755,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -802,7 +797,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
+ __ Assert(eq, AbortReason::kExpectedAllocationSite, a5, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
@@ -825,7 +820,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -900,11 +895,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a4, a4, a5);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- a5, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
+ Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(a2, a4);
@@ -982,11 +977,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
- at, Operand(zero_reg));
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
+ Operand(zero_reg));
__ GetObjectType(a3, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- a4, Operand(MAP_TYPE));
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
+ Operand(MAP_TYPE));
}
// Figure out the right elements kind.
@@ -1001,8 +996,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
- Operand(HOLEY_ELEMENTS));
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ a3, Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
@@ -1104,7 +1101,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
__ Lw(a1, MemOperand(s3, kLevelOffset));
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 970e0efe56..3be5e504bb 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -97,7 +96,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
@@ -180,7 +179,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ Lw(a4, MemOperand(a1));
@@ -264,7 +263,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3f);
+ __ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
@@ -437,7 +436,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1f);
+ __ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
@@ -546,8 +545,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@@ -558,8 +556,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -575,8 +572,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 523e268532..d53b47d0c6 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -449,7 +449,7 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
void Decoder::PrintXImm26(Instruction* instr) {
uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
<< kImmFieldShift;
- target = (reinterpret_cast<uint64_t>(instr) & ~0xfffffff) | target;
+ target = (reinterpret_cast<uint64_t>(instr) & ~0xFFFFFFF) | target;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target);
}
@@ -485,7 +485,7 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
void Decoder::PrintPCImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
- uint64_t pc_mask = ~0xfffffff;
+ uint64_t pc_mask = ~0xFFFFFFF;
uint64_t pc = ((uint64_t)(instr + 1) & pc_mask) | (imm26 << 2);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
@@ -2225,6 +2225,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case SWR:
Format(instr, "swr 'rt, 'imm16s('rs)");
break;
+ case SDR:
+ Format(instr, "sdr 'rt, 'imm16s('rs)");
+ break;
+ case SDL:
+ Format(instr, "sdl 'rt, 'imm16s('rs)");
+ break;
case LL:
if (kArchVariant == kMips64r6) {
Unknown(instr);
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index e55a0c57ed..8bc04a0401 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return a0; }
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return a4; }
-
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 25bc8baf80..841f4665cf 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -296,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ld(scratch, MemOperand(address));
- Assert(eq, kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
@@ -1537,14 +1537,14 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
kArchVariant == kMips64r6) {
return 2;
} else if ((value & kImm16Mask) == 0 &&
- ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
- ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (base::bits::IsPowerOfTwo(value + 1) ||
@@ -1649,8 +1649,8 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if ((j.immediate() & kImm16Mask) == 0 &&
- ((j.immediate() >> 31) & 0x1ffff) ==
- ((0x20000 - bit31) & 0x1ffff) &&
+ ((j.immediate() >> 31) & 0x1FFFF) ==
+ ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 16 LSBs all set to zero.
// 48 MSBs hold a signed value which can't be represented by signed
@@ -1665,8 +1665,8 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if (is_int16(static_cast<int32_t>(j.immediate())) &&
- ((j.immediate() >> 31) & 0x1ffff) ==
- ((0x20000 - bit31) & 0x1ffff) &&
+ ((j.immediate() >> 31) & 0x1FFFF) ==
+ ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 48 LSBs contain an unsigned 16-bit number.
// 16 MSBs contain a signed 16-bit number.
@@ -2163,7 +2163,7 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd,
{
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x7fffffffffffffff);
+ li(scratch1, 0x7FFFFFFFFFFFFFFF);
and_(t8, t8, scratch1);
}
dmtc1(t8, fs);
@@ -2297,7 +2297,7 @@ void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
}
// Load 2^63 into scratch as its double representation.
- li(at, 0x43e0000000000000);
+ li(at, 0x43E0000000000000);
dmtc1(at, scratch);
// Test if scratch > fd.
@@ -2351,7 +2351,7 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
// Load 2^63 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
- li(scratch1, 0x5f000000);
+ li(scratch1, 0x5F000000);
mtc1(scratch1, scratch);
}
@@ -4037,9 +4037,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
li(a6,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
@@ -4174,7 +4176,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
if (FLAG_debug_code) {
- Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
+ Operand(dst_reg));
}
// Restore caller's frame pointer and return address now as they will be
@@ -4747,13 +4750,13 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
@@ -4762,11 +4765,11 @@ void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -5095,7 +5098,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(ne, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -5106,7 +5109,7 @@ void MacroAssembler::AssertSmi(Register object) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
- Check(eq, kOperandIsASmi, scratch, Operand(zero_reg));
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
@@ -5114,9 +5117,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
+ Operand(FIXED_ARRAY_TYPE));
}
}
@@ -5124,9 +5129,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(JS_FUNCTION_TYPE));
}
}
@@ -5135,9 +5142,11 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
- Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
}
}
@@ -5145,7 +5154,8 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
GetObjectType(object, t8, t8);
@@ -5157,7 +5167,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- Abort(kOperandIsNotAGeneratorObject);
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -5171,7 +5181,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Branch(&done_checking, eq, object, Operand(scratch));
Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5402,8 +5412,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index a29c79635c..f89682d34c 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -195,13 +195,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
inline bool AllowThisStubCall(CodeStub* stub);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index e992efebf5..ebb8a76ad7 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -41,14 +41,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
- u0 = u & 0xffffffffL;
+ u0 = u & 0xFFFFFFFFL;
u1 = u >> 32;
- v0 = v & 0xffffffffL;
+ v0 = v & 0xFFFFFFFFL;
v1 = v >> 32;
w0 = u0 * v0;
t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xffffffffL;
+ w1 = t & 0xFFFFFFFFL;
w2 = t >> 32;
w1 = u0 * v1 + w1;
@@ -75,8 +75,8 @@ class MipsDebugger {
void PrintAllRegsIncludingFPU();
private:
- // We set the breakpoint code to 0xfffff to easily recognize it.
- static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+ // We set the breakpoint code to 0xFFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -401,7 +401,7 @@ void MipsDebugger::Debug() {
if (fpuregnum != kInvalidFPURegister) {
value = GetFPURegisterValue(fpuregnum);
- value &= 0xffffffffUL;
+ value &= 0xFFFFFFFFUL;
fvalue = GetFPURegisterValueFloat(fpuregnum);
PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
} else {
@@ -740,6 +740,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
int64_t start = reinterpret_cast<int64_t>(start_addr);
@@ -809,21 +813,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_size_ = FLAG_sim_stack_size * KB;
@@ -867,101 +862,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a swi (software-interrupt) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the swi instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int64_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1077,19 +977,19 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
@@ -1686,7 +1586,7 @@ int64_t Simulator::get_pc() const {
// TODO(plind): refactor this messy debug code when we do unaligned access.
void Simulator::DieOrDebug() {
- if (1) { // Flag for this was removed.
+ if ((1)) { // Flag for this was removed.
MipsDebugger dbg(this);
dbg.Debug();
} else {
@@ -2157,7 +2057,7 @@ void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int64_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
- return *ptr & 0xff;
+ return *ptr & 0xFF;
}
@@ -2272,7 +2172,7 @@ void Simulator::SoftwareInterrupt() {
uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
@@ -2546,7 +2446,7 @@ void Simulator::DisableStop(uint64_t code) {
void Simulator::IncreaseStopCounter(uint64_t code) {
DCHECK_LE(code, kMaxStopCode);
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF("Stop counter for code %" PRId64
" has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -2865,8 +2765,8 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Extracting sign, exponent and mantissa from the input float
uint32_t sign = (classed >> 31) & 1;
- uint32_t exponent = (classed >> 23) & 0x000000ff;
- uint32_t mantissa = classed & 0x007fffff;
+ uint32_t exponent = (classed >> 23) & 0x000000FF;
+ uint32_t mantissa = classed & 0x007FFFFF;
uint32_t result;
float fResult;
@@ -2887,7 +2787,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
// Setting flags if float is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && (exponent == 0xff)) {
+ if (!negInf && !posInf && (exponent == 0xFF)) {
quietNan = ((mantissa & 0x00200000) == 0) &&
((mantissa & (0x00200000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3396,8 +3296,8 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Extracting sign, exponent and mantissa from the input double
uint32_t sign = (classed >> 63) & 1;
- uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
- uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007FF;
+ uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF;
uint64_t result;
double dResult;
@@ -3418,7 +3318,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Setting flags if double is NaN
signalingNan = false;
quietNan = false;
- if (!negInf && !posInf && exponent == 0x7ff) {
+ if (!negInf && !posInf && exponent == 0x7FF) {
quietNan = ((mantissa & 0x0008000000000000) != 0) &&
((mantissa & (0x0008000000000000 - 1)) == 0);
signalingNan = !quietNan;
@@ -3951,12 +3851,12 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t rt_lo = static_cast<int32_t>(rt());
i64hilo = static_cast<int64_t>(rs_lo) * static_cast<int64_t>(rt_lo);
if (kArchVariant != kMips64r6) {
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
@@ -3969,15 +3869,15 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
}
case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u() & 0xffffffff) *
- static_cast<uint64_t>(rt_u() & 0xffffffff);
+ u64hilo = static_cast<uint64_t>(rs_u() & 0xFFFFFFFF) *
+ static_cast<uint64_t>(rt_u() & 0xFFFFFFFF);
if (kArchVariant != kMips64r6) {
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
} else {
switch (sa()) {
case MUL_OP:
- SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xFFFFFFFF));
break;
case MUH_OP:
SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
@@ -4370,7 +4270,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 4; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -4475,7 +4375,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Reverse the bit in byte for each individual byte
for (int i = 0; i < 8; i++) {
output = output >> 8;
- i_byte = input & 0xff;
+ i_byte = input & 0xFF;
// Fast way to reverse bits in byte
// Devised by Sean Anderson, July 13, 2001
@@ -5482,8 +5382,8 @@ void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt,
wd_p[2 * i + 1] = ws_p[2 * i + 1];
break;
case VSHF: {
- const int mask_not_valid = 0xc0;
- const int mask_6_bits = 0x3f;
+ const int mask_not_valid = 0xC0;
+ const int mask_6_bits = 0x3F;
if ((wd_p[i] & mask_not_valid)) {
wd_p[i] = 0;
} else {
@@ -5882,7 +5782,7 @@ void Simulator::DecodeTypeMsa3RF() {
break; \
} \
/* Infinity */ \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} else if (aExp == 0 && aFrac == 0) { \
dst = PACK_FLOAT16(aSign, 0, 0); \
@@ -5896,13 +5796,13 @@ void Simulator::DecodeTypeMsa3RF() {
aExp -= 0x71; \
if (aExp < 1) { \
/* Will be denormal in halfprec */ \
- mask = 0x00ffffff; \
+ mask = 0x00FFFFFF; \
if (aExp >= -11) { \
mask >>= 11 + aExp; \
} \
} else { \
/* Normal number in halfprec */ \
- mask = 0x00001fff; \
+ mask = 0x00001FFF; \
} \
switch (MSACSR_ & 3) { \
case kRoundToNearest: \
@@ -5923,7 +5823,7 @@ void Simulator::DecodeTypeMsa3RF() {
} \
rounding_bumps_exp = (aFrac + increment >= 0x01000000); \
if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \
- dst = PACK_FLOAT16(aSign, 0x1f, 0); \
+ dst = PACK_FLOAT16(aSign, 0x1F, 0); \
break; \
} \
aFrac += increment; \
@@ -6444,8 +6344,8 @@ template <typename T_int, typename T_fp, typename T_reg>
T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
switch (opcode) {
#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15)
-#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1f)
-#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3ff)
+#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F)
+#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF)
#define PACK_FLOAT32(sign, exp, frac) \
static_cast<uint32_t>(((sign) << 31) + ((exp) << 23) + (frac))
#define FEXUP_DF(src_index) \
@@ -6455,9 +6355,9 @@ T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) {
aSign = EXTRACT_FLOAT16_SIGN(element); \
aExp = EXTRACT_FLOAT16_EXP(element); \
aFrac = EXTRACT_FLOAT16_FRAC(element); \
- if (V8_LIKELY(aExp && aExp != 0x1f)) { \
+ if (V8_LIKELY(aExp && aExp != 0x1F)) { \
return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \
- } else if (aExp == 0x1f) { \
+ } else if (aExp == 0x1F) { \
if (aFrac) { \
return bit_cast<int32_t>(std::numeric_limits<float>::quiet_NaN()); \
} else { \
@@ -6624,10 +6524,10 @@ void Simulator::DecodeTypeImmediate() {
int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
- uint64_t oe_imm16 = 0xffff & imm16;
+ uint64_t oe_imm16 = 0xFFFF & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
- int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
+ int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xFFFFFFFFFFFC0000 : 0);
// Next pc.
int64_t next_pc = bad_ra;
@@ -6678,11 +6578,11 @@ void Simulator::DecodeTypeImmediate() {
const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte;
if (do_branch) {
if (FLAG_debug_code) {
- int16_t bits = imm16 & 0xfc;
+ int16_t bits = imm16 & 0xFC;
if (imm16 >= 0) {
CHECK_EQ(bits, 0);
} else {
- CHECK_EQ(bits ^ 0xfc, 0);
+ CHECK_EQ(bits ^ 0xFC, 0);
}
}
// jump range :[pc + kInstrSize - 512 * kInstrSize,
@@ -6973,7 +6873,6 @@ void Simulator::DecodeTypeImmediate() {
break;
// ------------- Arithmetic instructions.
case ADDIU: {
- DCHECK(is_int32(rs));
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
SetResult(rt_reg, static_cast<int64_t>(alu32_out));
@@ -7122,7 +7021,7 @@ void Simulator::DecodeTypeImmediate() {
uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr_.instr()) & mask;
- mem_value |= rt >> byte_shift * 8;
+ mem_value |= static_cast<uint64_t>(rt) >> byte_shift * 8;
Write2W(addr, mem_value, instr_.instr());
break;
}
@@ -7227,7 +7126,7 @@ void Simulator::DecodeTypeImmediate() {
}
case ADDIUPC: {
int64_t se_imm19 =
- imm19 | ((imm19 & 0x40000) ? 0xfffffffffff80000 : 0);
+ imm19 | ((imm19 & 0x40000) ? 0xFFFFFFFFFFF80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
}
@@ -7337,7 +7236,7 @@ void Simulator::DecodeTypeJump() {
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
- int64_t pc_high_bits = current_pc & 0xfffffffff0000000;
+ int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000;
// Next pc.
int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
@@ -7504,33 +7403,30 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, fp_val);
}
-
-int64_t Simulator::Call(byte* entry, int argument_count, ...) {
- const int kRegisterPassedArguments = 8;
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
// Set up arguments.
// First four arguments passed in registers in both ABI's.
- DCHECK_GE(argument_count, 4);
- set_register(a0, va_arg(parameters, int64_t));
- set_register(a1, va_arg(parameters, int64_t));
- set_register(a2, va_arg(parameters, int64_t));
- set_register(a3, va_arg(parameters, int64_t));
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 2) set_register(a3, arguments[3]);
// Up to eight arguments passed in registers in N64 ABI.
// TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
- if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
- if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
- if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
- if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
// Remaining arguments passed on stack.
int64_t original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
- int stack_args_count = (argument_count > kRegisterPassedArguments) ?
- (argument_count - kRegisterPassedArguments) : 0;
- int stack_args_size = stack_args_count * sizeof(int64_t) + kCArgsSlotsSize;
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize;
int64_t entry_stack = original_stack - stack_args_size;
if (base::OS::ActivationFrameAlignment() != 0) {
@@ -7538,11 +7434,8 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = kRegisterPassedArguments; i < argument_count; i++) {
- int stack_index = i - kRegisterPassedArguments + kCArgSlotCount;
- stack_argument[stack_index] = va_arg(parameters, int64_t);
- }
- va_end(parameters);
+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -7551,8 +7444,7 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- int64_t result = get_register(v0);
- return result;
+ return get_register(v0);
}
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 4ef22cbcfe..c4292236b0 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
@@ -16,71 +15,12 @@
#include "src/allocation.h"
#include "src/mips64/constants-mips64.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native mips platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4)
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-typedef int (*mips_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- Isolate* isolate);
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on mips uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
-
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -151,7 +91,7 @@ class SimInstruction : public InstructionGetters<SimInstructionBase> {
}
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class MipsDebugger;
@@ -231,7 +171,7 @@ class Simulator {
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@@ -298,15 +238,11 @@ class Simulator {
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int64_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
@@ -320,6 +256,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -342,6 +281,9 @@ class Simulator {
Unpredictable = 0xbadbeaf
};
+ V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -587,11 +529,6 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
@@ -645,45 +582,8 @@ class Simulator {
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
- reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
- reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 9, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
- p8))
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index ec6c39e288..bd391d272b 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -564,6 +564,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
}
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index f1f49d5c45..142dbf6611 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -14,6 +14,7 @@
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module.h"
@@ -44,6 +45,13 @@ void Object::VerifyPointer(Object* p) {
}
}
+namespace {
+void VerifyForeignPointer(HeapObject* host, Object* foreign) {
+ host->VerifyPointer(foreign);
+ CHECK(foreign->IsUndefined(host->GetIsolate()) ||
+ Foreign::IsNormalized(foreign));
+}
+} // namespace
void Smi::SmiVerify() {
CHECK(IsSmi());
@@ -252,6 +260,14 @@ void HeapObject::HeapObjectVerify() {
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case LOAD_HANDLER_TYPE:
+ LoadHandler::cast(this)->LoadHandlerVerify();
+ break;
+
+ case STORE_HANDLER_TYPE:
+ StoreHandler::cast(this)->StoreHandlerVerify();
+ break;
+
default:
UNREACHABLE();
break;
@@ -432,6 +448,10 @@ void Map::MapVerify() {
CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(elements_kind()) ||
IsTerminalElementsKind(elements_kind()));
+ if (is_prototype_map()) {
+ DCHECK(prototype_info() == Smi::kZero ||
+ prototype_info()->IsPrototypeInfo());
+ }
}
@@ -475,11 +495,11 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
uint64_t value = get_representation(i);
uint64_t unexpected =
bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN()) &
- V8_UINT64_C(0x7FF8000000000000);
+ uint64_t{0x7FF8000000000000};
// Create implementation specific sNaN by inverting relevant bit.
- unexpected ^= V8_UINT64_C(0x0008000000000000);
- CHECK((value & V8_UINT64_C(0x7FF8000000000000)) != unexpected ||
- (value & V8_UINT64_C(0x0007FFFFFFFFFFFF)) == V8_UINT64_C(0));
+ unexpected ^= uint64_t{0x0008000000000000};
+ CHECK((value & uint64_t{0x7FF8000000000000}) != unexpected ||
+ (value & uint64_t{0x0007FFFFFFFFFFFF}) == uint64_t{0});
}
}
}
@@ -930,7 +950,7 @@ void JSArray::JSArrayVerify() {
CHECK(HasDictionaryElements());
uint32_t array_length;
CHECK(length()->ToArrayLength(&array_length));
- if (array_length == 0xffffffff) {
+ if (array_length == 0xFFFFFFFF) {
CHECK(length()->ToArrayLength(&array_length));
}
if (array_length != 0) {
@@ -1137,8 +1157,10 @@ void JSProxy::JSProxyVerify() {
VerifyPointer(target());
VerifyPointer(handler());
Isolate* isolate = GetIsolate();
- CHECK_EQ(target()->IsCallable(), map()->is_callable());
- CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
+ if (!IsRevoked()) {
+ CHECK_EQ(target()->IsCallable(), map()->is_callable());
+ CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
+ }
CHECK(map()->prototype()->IsNull(isolate));
// There should be no properties on a Proxy.
CHECK_EQ(0, map()->NumberOfOwnDescriptors());
@@ -1303,7 +1325,7 @@ void PrototypeInfo::PrototypeInfoVerify() {
} else {
CHECK(prototype_users()->IsSmi());
}
- CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
+ CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
}
void Tuple2::Tuple2Verify() {
@@ -1325,6 +1347,33 @@ void Tuple3::Tuple3Verify() {
VerifyObjectField(kValue3Offset);
}
+void DataHandler::DataHandlerVerify() {
+ CHECK(IsDataHandler());
+ CHECK_IMPLIES(!smi_handler()->IsSmi(),
+ smi_handler()->IsCode() && IsStoreHandler());
+ CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ VerifyObjectField(kData1Offset);
+ }
+ if (data_count >= 2) {
+ VerifyObjectField(kData2Offset);
+ }
+ if (data_count >= 3) {
+ VerifyObjectField(kData3Offset);
+ }
+}
+
+void LoadHandler::LoadHandlerVerify() {
+ DataHandler::DataHandlerVerify();
+ // TODO(ishell): check handler integrity
+}
+
+void StoreHandler::StoreHandlerVerify() {
+ DataHandler::DataHandlerVerify();
+ // TODO(ishell): check handler integrity
+}
+
void ContextExtension::ContextExtensionVerify() {
CHECK(IsContextExtension());
VerifyObjectField(kScopeInfoOffset);
@@ -1335,9 +1384,9 @@ void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
VerifyPointer(name());
VerifyPointer(expected_receiver_type());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(js_getter());
+ VerifyForeignPointer(this, getter());
+ VerifyForeignPointer(this, setter());
+ VerifyForeignPointer(this, js_getter());
VerifyPointer(data());
}
@@ -1360,11 +1409,11 @@ void AccessCheckInfo::AccessCheckInfoVerify() {
void InterceptorInfo::InterceptorInfoVerify() {
CHECK(IsInterceptorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
- VerifyPointer(query());
- VerifyPointer(deleter());
- VerifyPointer(enumerator());
+ VerifyForeignPointer(this, getter());
+ VerifyForeignPointer(this, setter());
+ VerifyForeignPointer(this, query());
+ VerifyForeignPointer(this, deleter());
+ VerifyForeignPointer(this, enumerator());
VerifyPointer(data());
VerifySmiField(kFlagsOffset);
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1cbb299057..c3841aa63e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -34,9 +34,12 @@
#include "src/objects.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
@@ -77,15 +80,12 @@ int PropertyDetails::field_width_in_words() const {
TYPE_CHECKER(BigInt, BIGINT_TYPE)
TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
-TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HashTable, HASH_TABLE_TYPE)
@@ -99,15 +99,10 @@ TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
@@ -120,13 +115,11 @@ TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
TYPE_CHECKER(TemplateMap, HASH_TABLE_TYPE)
TYPE_CHECKER(TemplateObjectDescription, TUPLE3_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-TYPE_CHECKER(TypeFeedbackInfo, TUPLE3_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
TYPE_CHECKER(WasmTableObject, WASM_TABLE_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
-TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
@@ -304,6 +297,9 @@ bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
bool HeapObject::IsJSMapIterator() const {
InstanceType instance_type = map()->instance_type();
+ STATIC_ASSERT(JS_MAP_KEY_ITERATOR_TYPE + 1 == JS_MAP_KEY_VALUE_ITERATOR_TYPE);
+ STATIC_ASSERT(JS_MAP_KEY_VALUE_ITERATOR_TYPE + 1 ==
+ JS_MAP_VALUE_ITERATOR_TYPE);
return (instance_type >= JS_MAP_KEY_ITERATOR_TYPE &&
instance_type <= JS_MAP_VALUE_ITERATOR_TYPE);
}
@@ -332,7 +328,10 @@ bool HeapObject::IsEnumCache() const { return IsTuple2(); }
bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
-bool HeapObject::IsArrayList() const { return IsFixedArrayExact(); }
+bool HeapObject::IsArrayList() const {
+ return map() == GetHeap()->array_list_map() ||
+ this == GetHeap()->empty_fixed_array();
+}
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
@@ -548,21 +547,15 @@ CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessorPair)
CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
-CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(BoilerplateDescription)
-CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(CallHandlerInfo)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedArrayBase)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
@@ -577,20 +570,13 @@ CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSValue)
-CAST_ACCESSOR(JSWeakCollection)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
@@ -619,14 +605,11 @@ CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(TemplateMap)
CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
-CAST_ACCESSOR(TypeFeedbackInfo)
CAST_ACCESSOR(WeakCell)
-CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
bool Object::HasValidElements() {
@@ -1087,29 +1070,8 @@ int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
-inline Object* OrderedHashMap::ValueAt(int entry) {
- DCHECK_LT(entry, this->UsedCapacity());
- return get(EntryToIndex(entry) + kValueOffset);
-}
-
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-Object** FixedArray::GetFirstElementAddress() {
- return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
-}
-
-
-bool FixedArray::ContainsOnlySmisOrHoles() {
- Object* the_hole = GetHeap()->the_hole_value();
- Object** current = GetFirstElementAddress();
- for (int i = 0; i < length(); ++i) {
- Object* candidate = *current++;
- if (!candidate->IsSmi() && candidate != the_hole) return false;
- }
- return true;
-}
-
-
FixedArrayBase* JSObject::elements() const {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
@@ -1467,6 +1429,15 @@ inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
}
+// This should be in objects/map-inl.h, but can't, because of a cyclic
+// dependency.
+bool Map::IsSpecialReceiverMap() const {
+ bool result = IsSpecialReceiverInstanceType(instance_type());
+ DCHECK_IMPLIES(!result,
+ !has_named_interceptor() && !is_access_check_needed());
+ return result;
+}
+
// static
int JSObject::GetEmbedderFieldCount(const Map* map) {
int instance_size = map->instance_size();
@@ -1517,13 +1488,6 @@ bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
return map()->IsUnboxedDoubleField(index);
}
-bool Map::IsUnboxedDoubleField(FieldIndex index) const {
- if (!FLAG_unbox_double_fields) return false;
- if (index.is_hidden_field() || !index.is_inobject()) return false;
- return !layout_descriptor()->IsTagged(index.property_index());
-}
-
-
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
@@ -1657,16 +1621,6 @@ void JSObject::InitializeBody(Map* map, int start_offset,
}
}
-bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
- if (UnusedPropertyFields() != 0) return false;
- if (is_prototype_map()) return false;
- int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
- int limit = Max(minimum, GetInObjectProperties());
- int external = NumberOfFields() - GetInObjectProperties();
- return external > limit;
-}
-
-
void Struct::InitializeBody(int object_size) {
Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1696,57 +1650,12 @@ void Object::VerifyApiCallResultType() {
#endif // DEBUG
}
-
-Object* FixedArray::get(int index) const {
- SLOW_DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
-}
-
Object* PropertyArray::get(int index) const {
DCHECK_GE(index, 0);
DCHECK_LE(index, this->length());
return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
-Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
- return handle(array->get(index), isolate);
-}
-
-template <class T>
-MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
- Object* obj = get(index);
- if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
- return Handle<T>(T::cast(obj), isolate);
-}
-
-template <class T>
-Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
- Object* obj = get(index);
- CHECK(!obj->IsUndefined(isolate));
- return Handle<T>(T::cast(obj), isolate);
-}
-bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index)->IsTheHole(isolate);
-}
-
-void FixedArray::set(int index, Smi* value) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
- DCHECK_LT(index, this->length());
- DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
-}
-
-void FixedArray::set(int index, Object* value) {
- DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
- DCHECK(IsFixedArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
void PropertyArray::set(int index, Object* value) {
DCHECK(IsPropertyArray());
DCHECK_GE(index, 0);
@@ -1756,154 +1665,6 @@ void PropertyArray::set(int index, Object* value) {
WRITE_BARRIER(GetHeap(), this, offset, value);
}
-double FixedDoubleArray::get_scalar(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- DCHECK(index >= 0 && index < this->length());
- DCHECK(!is_the_hole(index));
- return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
-}
-
-
-uint64_t FixedDoubleArray::get_representation(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- DCHECK(index >= 0 && index < this->length());
- int offset = kHeaderSize + index * kDoubleSize;
- return READ_UINT64_FIELD(this, offset);
-}
-
-Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
- Isolate* isolate) {
- if (array->is_the_hole(index)) {
- return isolate->factory()->the_hole_value();
- } else {
- return isolate->factory()->NewNumber(array->get_scalar(index));
- }
-}
-
-
-void FixedDoubleArray::set(int index, double value) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- if (std::isnan(value)) {
- WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
- } else {
- WRITE_DOUBLE_FIELD(this, offset, value);
- }
- DCHECK(!is_the_hole(index));
-}
-
-void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
- set_the_hole(index);
-}
-
-void FixedDoubleArray::set_the_hole(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
- map() != GetHeap()->fixed_array_map());
- int offset = kHeaderSize + index * kDoubleSize;
- WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
-}
-
-bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
- return is_the_hole(index);
-}
-
-bool FixedDoubleArray::is_the_hole(int index) {
- return get_representation(index) == kHoleNanInt64;
-}
-
-
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-void FixedDoubleArray::FillWithHoles(int from, int to) {
- for (int i = from; i < to; i++) {
- set_the_hole(i);
- }
-}
-
-Object* WeakFixedArray::Get(int index) const {
- Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
- if (raw->IsSmi()) return raw;
- DCHECK(raw->IsWeakCell());
- return WeakCell::cast(raw)->value();
-}
-
-
-bool WeakFixedArray::IsEmptySlot(int index) const {
- DCHECK(index < Length());
- return Get(index)->IsSmi();
-}
-
-
-void WeakFixedArray::Clear(int index) {
- FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
-}
-
-
-int WeakFixedArray::Length() const {
- return FixedArray::cast(this)->length() - kFirstIndex;
-}
-
-
-int WeakFixedArray::last_used_index() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
-}
-
-
-void WeakFixedArray::set_last_used_index(int index) {
- FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
-}
-
-
-template <class T>
-T* WeakFixedArray::Iterator::Next() {
- if (list_ != nullptr) {
- // Assert that list did not change during iteration.
- DCHECK_EQ(last_used_index_, list_->last_used_index());
- while (index_ < list_->Length()) {
- Object* item = list_->Get(index_++);
- if (item != Empty()) return T::cast(item);
- }
- list_ = nullptr;
- }
- return nullptr;
-}
-
-int ArrayList::Length() const {
- if (FixedArray::cast(this)->length() == 0) return 0;
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
-}
-
-
-void ArrayList::SetLength(int length) {
- return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
-}
-
-Object* ArrayList::Get(int index) const {
- return FixedArray::cast(this)->get(kFirstIndex + index);
-}
-
-
-Object** ArrayList::Slot(int index) {
- return data_start() + kFirstIndex + index;
-}
-
-void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
- FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
-}
-
-
-void ArrayList::Clear(int index, Object* undefined) {
- DCHECK(undefined->IsUndefined(GetIsolate()));
- FixedArray::cast(this)
- ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
-}
-
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object* obj = get(kNumberOfCapturesIndex);
@@ -1987,17 +1748,6 @@ bool HeapObject::NeedsRehashing() const {
}
}
-void FixedArray::set(int index,
- Object* value,
- WriteBarrierMode mode) {
- DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
-}
-
void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
@@ -2006,57 +1756,10 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
-void FixedArray::NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, array->length());
- DCHECK(!array->GetHeap()->InNewSpace(value));
- RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
-}
-
-void FixedArray::set_undefined(int index) {
- set_undefined(GetIsolate(), index);
-}
-
-void FixedArray::set_undefined(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index,
- isolate->heap()->undefined_value());
-}
-
-void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
-
-void FixedArray::set_null(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
-}
-
-void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
-
-void FixedArray::set_the_hole(Isolate* isolate, int index) {
- FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
-}
-
-void FixedArray::FillWithHoles(int from, int to) {
- Isolate* isolate = GetIsolate();
- for (int i = from; i < to; i++) {
- set_the_hole(isolate, i);
- }
-}
-
-
-Object** FixedArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
-
Object** PropertyArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
-Object** FixedArray::RawFieldOfElementAt(int index) {
- return HeapObject::RawField(this, OffsetOfElementAt(index));
-}
-
ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
@@ -2215,54 +1918,6 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
return number;
}
-PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors()->GetDetails(LastAdded());
-}
-
-int Map::LastAdded() const {
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK_GT(number_of_own_descriptors, 0);
- return number_of_own_descriptors - 1;
-}
-
-int Map::NumberOfOwnDescriptors() const {
- return NumberOfOwnDescriptorsBits::decode(bit_field3());
-}
-
-
-void Map::SetNumberOfOwnDescriptors(int number) {
- CHECK_LE(static_cast<unsigned>(number),
- static_cast<unsigned>(kMaxNumberOfDescriptors));
- set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
-}
-
-int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
-
-void Map::SetEnumLength(int length) {
- if (length != kInvalidEnumCacheSentinel) {
- DCHECK_LE(length, NumberOfOwnDescriptors());
- CHECK_LE(static_cast<unsigned>(length),
- static_cast<unsigned>(kMaxNumberOfDescriptors));
- }
- set_bit_field3(EnumLengthBits::update(bit_field3(), length));
-}
-
-FixedArrayBase* Map::GetInitialElements() const {
- FixedArrayBase* result = nullptr;
- if (has_fast_elements() || has_fast_string_wrapper_elements()) {
- result = GetHeap()->empty_fixed_array();
- } else if (has_fast_sloppy_arguments_elements()) {
- result = GetHeap()->empty_sloppy_arguments_elements();
- } else if (has_fixed_typed_array_elements()) {
- result = GetHeap()->EmptyFixedTypedArrayForMap(this);
- } else if (has_dictionary_elements()) {
- result = GetHeap()->empty_slow_element_dictionary();
- } else {
- UNREACHABLE();
- }
- DCHECK(!GetHeap()->InNewSpace(result));
- return result;
-}
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
@@ -2389,101 +2044,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
-int HashTableBase::NumberOfElements() const {
- return Smi::ToInt(get(kNumberOfElementsIndex));
-}
-
-int HashTableBase::NumberOfDeletedElements() const {
- return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
-}
-
-int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
-
-void HashTableBase::ElementAdded() {
- SetNumberOfElements(NumberOfElements() + 1);
-}
-
-
-void HashTableBase::ElementRemoved() {
- SetNumberOfElements(NumberOfElements() - 1);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
-}
-
-
-void HashTableBase::ElementsRemoved(int n) {
- SetNumberOfElements(NumberOfElements() - n);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
-}
-
-
-// static
-int HashTableBase::ComputeCapacity(int at_least_space_for) {
- // Add 50% slack to make slot collisions sufficiently unlikely.
- // See matching computation in HashTable::HasSufficientCapacityToAdd().
- // Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
- int raw_cap = at_least_space_for + (at_least_space_for >> 1);
- int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
- return Max(capacity, kMinCapacity);
-}
-
-void HashTableBase::SetNumberOfElements(int nof) {
- set(kNumberOfElementsIndex, Smi::FromInt(nof));
-}
-
-
-void HashTableBase::SetNumberOfDeletedElements(int nod) {
- set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
-}
-
-template <typename Key>
-int BaseShape<Key>::GetMapRootIndex() {
- return Heap::kHashTableMapRootIndex;
-}
-
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Key key) {
- return FindEntry(GetIsolate(), key);
-}
-
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
- return FindEntry(isolate, key, Shape::Hash(isolate, key));
-}
-
-// Find entry for key otherwise return kNotFound.
-template <typename Derived, typename Shape>
-int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
- int32_t hash) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(hash, capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- Object* undefined = isolate->heap()->undefined_value();
- Object* the_hole = isolate->heap()->the_hole_value();
- USE(the_hole);
- while (true) {
- Object* element = KeyAt(entry);
- // Empty entry. Uses raw unchecked accessors because it is called by the
- // string table during bootstrapping.
- if (element == undefined) break;
- if (!(Shape::kNeedsHoleCheck && the_hole == element)) {
- if (Shape::IsMatch(key, element)) return entry;
- }
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
- return FindEntry(isolate, key, hash) != kNotFound;
-}
-
-bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
- Object* hash = key->GetHash();
- if (!hash->IsSmi()) return false;
- return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
-}
-
bool StringSetShape::IsMatch(String* key, Object* value) {
DCHECK(value->IsString());
return key->Equals(String::cast(value));
@@ -2536,50 +2096,6 @@ void NumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
-
-template <class T>
-PodArray<T>* PodArray<T>::cast(Object* object) {
- SLOW_DCHECK(object->IsByteArray());
- return reinterpret_cast<PodArray<T>*>(object);
-}
-template <class T>
-const PodArray<T>* PodArray<T>::cast(const Object* object) {
- SLOW_DCHECK(object->IsByteArray());
- return reinterpret_cast<const PodArray<T>*>(object);
-}
-
-// static
-template <class T>
-Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
- PretenureFlag pretenure) {
- return Handle<PodArray<T>>::cast(
- isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
-}
-
-// static
-template <class Traits>
-STATIC_CONST_MEMBER_DEFINITION const InstanceType
- FixedTypedArray<Traits>::kInstanceType;
-
-
-template <class Traits>
-FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
-}
-
-
-template <class Traits>
-const FixedTypedArray<Traits>*
-FixedTypedArray<Traits>::cast(const Object* object) {
- SLOW_DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
-}
-
DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
@@ -2593,23 +2109,6 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-template <typename Derived, typename Shape>
-HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<HashTable*>(obj);
-}
-
-template <typename Derived, typename Shape>
-const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
- const Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<const HashTable*>(obj);
-}
-
-
-SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-
int PropertyArray::length() const {
Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
@@ -2672,394 +2171,6 @@ FreeSpace* FreeSpace::cast(HeapObject* o) {
return reinterpret_cast<FreeSpace*>(o);
}
-int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
-byte ByteArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-void ByteArray::set(int index, byte value) {
- DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
-}
-
-void ByteArray::copy_in(int index, const byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
- index + length <= this->length());
- byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
- memcpy(dst_addr, buffer, length);
-}
-
-void ByteArray::copy_out(int index, byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
- index + length <= this->length());
- const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
- memcpy(buffer, src_addr, length);
-}
-
-int ByteArray::get_int(int index) const {
- DCHECK(index >= 0 && index < this->length() / kIntSize);
- return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
-}
-
-void ByteArray::set_int(int index, int value) {
- DCHECK(index >= 0 && index < this->length() / kIntSize);
- WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
-}
-
-uint32_t ByteArray::get_uint32(int index) const {
- DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
-}
-
-void ByteArray::set_uint32(int index, uint32_t value) {
- DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
-}
-
-void ByteArray::clear_padding() {
- int data_size = length() + kHeaderSize;
- memset(address() + data_size, 0, Size() - data_size);
-}
-
-ByteArray* ByteArray::FromDataStartAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
-}
-
-int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
-
-int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
-
-Address ByteArray::GetDataStartAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-
-
-void* FixedTypedArrayBase::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void FixedTypedArrayBase::set_external_pointer(void* value,
- WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
-}
-
-
-void* FixedTypedArrayBase::DataPtr() {
- return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(base_pointer()) +
- reinterpret_cast<intptr_t>(external_pointer()));
-}
-
-
-int FixedTypedArrayBase::ElementSize(InstanceType type) {
- int element_size;
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- element_size = size; \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- default:
- UNREACHABLE();
- }
- return element_size;
-}
-
-int FixedTypedArrayBase::DataSize(InstanceType type) const {
- if (base_pointer() == Smi::kZero) return 0;
- return length() * ElementSize(type);
-}
-
-int FixedTypedArrayBase::DataSize() const {
- return DataSize(map()->instance_type());
-}
-
-size_t FixedTypedArrayBase::ByteLength() const {
- return static_cast<size_t>(length()) *
- static_cast<size_t>(ElementSize(map()->instance_type()));
-}
-
-int FixedTypedArrayBase::size() const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
-}
-
-int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
-}
-
-// static
-int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
- return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
-}
-
-
-uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
-
-
-uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
-
-
-int8_t Int8ArrayTraits::defaultValue() { return 0; }
-
-
-uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
-
-
-int16_t Int16ArrayTraits::defaultValue() { return 0; }
-
-
-uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
-
-
-int32_t Int32ArrayTraits::defaultValue() { return 0; }
-
-
-float Float32ArrayTraits::defaultValue() {
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-double Float64ArrayTraits::defaultValue() {
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- // The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
- // ThreadSanitizer will catch these racy accesses and warn about them, so we
- // disable TSAN for these reads and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- auto result = ptr[index];
- TSAN_ANNOTATE_IGNORE_READS_END;
- return result;
-}
-
-
-template <class Traits>
-void FixedTypedArray<Traits>::set(int index, ElementType value) {
- CHECK((index >= 0) && (index < this->length()));
- // See the comment in FixedTypedArray<Traits>::get_scalar.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- ptr[index] = value;
- TSAN_ANNOTATE_IGNORE_WRITES_END;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
- if (value < 0) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
- // We need this special case for Uint32 -> Uint8Clamped, because the highest
- // Uint32 values will be negative as an int, clamping to 0, rather than 255.
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
- return static_cast<ElementType>(DoubleToInt32(value));
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
- // Handle NaNs and less than zero values which clamp to zero.
- if (!(value > 0)) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(lrint(value));
-}
-
-template <>
-inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
- return static_cast<float>(value);
-}
-
-template <>
-inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
- return value;
-}
-
-template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
- int index) {
- return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
-}
-
-
-template <class Traits>
-void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
- ElementType cast_value = Traits::defaultValue();
- if (value->IsSmi()) {
- int int_value = Smi::ToInt(value);
- cast_value = from(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = from(double_value);
- } else {
- // Clamp undefined to the default value. All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined(GetIsolate()));
- }
- set(index, cast_value);
-}
-
-
-Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
- uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-
-Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
- return isolate->factory()->NewNumberFromUint(scalar);
-}
-
-
-Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
- return isolate->factory()->NewNumberFromInt(scalar);
-}
-
-
-Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-
-Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-VisitorId Map::visitor_id() const {
- return static_cast<VisitorId>(READ_BYTE_FIELD(this, kVisitorIdOffset));
-}
-
-void Map::set_visitor_id(VisitorId id) {
- DCHECK_LE(0, id);
- DCHECK_LT(id, 256);
- WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
-}
-
-int Map::instance_size_in_words() const {
- return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
-}
-
-void Map::set_instance_size_in_words(int value) {
- RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
- static_cast<byte>(value));
-}
-
-int Map::instance_size() const {
- return instance_size_in_words() << kPointerSizeLog2;
-}
-
-void Map::set_instance_size(int value) {
- CHECK_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
- CHECK_LT(static_cast<unsigned>(value), 256);
- set_instance_size_in_words(value);
-}
-
-int Map::inobject_properties_start_or_constructor_function_index() const {
- return RELAXED_READ_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
-}
-
-void Map::set_inobject_properties_start_or_constructor_function_index(
- int value) {
- CHECK_LT(static_cast<unsigned>(value), 256);
- RELAXED_WRITE_BYTE_FIELD(
- this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
- static_cast<byte>(value));
-}
-
-int Map::GetInObjectPropertiesStartInWords() const {
- DCHECK(IsJSObjectMap());
- return inobject_properties_start_or_constructor_function_index();
-}
-
-void Map::SetInObjectPropertiesStartInWords(int value) {
- CHECK(IsJSObjectMap());
- set_inobject_properties_start_or_constructor_function_index(value);
-}
-
-int Map::GetInObjectProperties() const {
- DCHECK(IsJSObjectMap());
- return instance_size_in_words() - GetInObjectPropertiesStartInWords();
-}
-
-int Map::GetConstructorFunctionIndex() const {
- DCHECK(IsPrimitiveMap());
- return inobject_properties_start_or_constructor_function_index();
-}
-
-
-void Map::SetConstructorFunctionIndex(int value) {
- CHECK(IsPrimitiveMap());
- set_inobject_properties_start_or_constructor_function_index(value);
-}
-
-int Map::GetInObjectPropertyOffset(int index) const {
- return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
-}
-
-
-Handle<Map> Map::AddMissingTransitionsForTesting(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
-}
-
int HeapObject::SizeFromMap(Map* map) const {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
@@ -3125,633 +2236,6 @@ int HeapObject::SizeFromMap(Map* map) const {
return reinterpret_cast<const Code*>(this)->CodeSize();
}
-InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(
- READ_UINT16_FIELD(this, kInstanceTypeOffset));
-}
-
-
-void Map::set_instance_type(InstanceType value) {
- WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
-}
-
-int Map::UnusedPropertyFields() const {
- int value = used_or_unused_instance_size_in_words();
- DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
- int unused;
- if (value >= JSObject::kFieldsAdded) {
- unused = instance_size_in_words() - value;
- } else {
- // For out of object properties "used_or_unused_instance_size_in_words"
- // byte encodes the slack in the property array.
- unused = value;
- }
- return unused;
-}
-
-int Map::used_or_unused_instance_size_in_words() const {
- return READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
-}
-
-void Map::set_used_or_unused_instance_size_in_words(int value) {
- CHECK_LE(static_cast<unsigned>(value), 255);
- WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
- static_cast<byte>(value));
-}
-
-int Map::UsedInstanceSize() const {
- int words = used_or_unused_instance_size_in_words();
- if (words < JSObject::kFieldsAdded) {
- // All in-object properties are used and the words is tracking the slack
- // in the property array.
- return instance_size();
- }
- return words * kPointerSize;
-}
-
-void Map::SetInObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
- if (!IsJSObjectMap()) {
- CHECK_EQ(0, value);
- set_used_or_unused_instance_size_in_words(0);
- DCHECK_EQ(0, UnusedPropertyFields());
- return;
- }
- CHECK_LE(0, value);
- DCHECK_LE(value, GetInObjectProperties());
- int used_inobject_properties = GetInObjectProperties() - value;
- set_used_or_unused_instance_size_in_words(
- GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
- DCHECK_EQ(value, UnusedPropertyFields());
-}
-
-void Map::SetOutOfObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
- CHECK_LT(static_cast<unsigned>(value), JSObject::kFieldsAdded);
- // For out of object properties "used_instance_size_in_words" byte encodes
- // the slack in the property array.
- set_used_or_unused_instance_size_in_words(value);
- DCHECK_EQ(value, UnusedPropertyFields());
-}
-
-void Map::CopyUnusedPropertyFields(Map* map) {
- set_used_or_unused_instance_size_in_words(
- map->used_or_unused_instance_size_in_words());
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
-}
-
-void Map::AccountAddedPropertyField() {
- // Update used instance size and unused property fields number.
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
-#ifdef DEBUG
- int new_unused = UnusedPropertyFields() - 1;
- if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
-#endif
- int value = used_or_unused_instance_size_in_words();
- if (value >= JSObject::kFieldsAdded) {
- if (value == instance_size_in_words()) {
- AccountAddedOutOfObjectPropertyField(0);
- } else {
- // The property is added in-object, so simply increment the counter.
- set_used_or_unused_instance_size_in_words(value + 1);
- }
- } else {
- AccountAddedOutOfObjectPropertyField(value);
- }
- DCHECK_EQ(new_unused, UnusedPropertyFields());
-}
-
-void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
- unused_in_property_array--;
- if (unused_in_property_array < 0) {
- unused_in_property_array += JSObject::kFieldsAdded;
- }
- CHECK_LT(static_cast<unsigned>(unused_in_property_array),
- JSObject::kFieldsAdded);
- set_used_or_unused_instance_size_in_words(unused_in_property_array);
- DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
-}
-
-byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
-
-
-void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
-}
-
-
-byte Map::bit_field2() const { return READ_BYTE_FIELD(this, kBitField2Offset); }
-
-
-void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(this, kBitField2Offset, value);
-}
-
-
-void Map::set_non_instance_prototype(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kHasNonInstancePrototype));
- } else {
- set_bit_field(bit_field() & ~(1 << kHasNonInstancePrototype));
- }
-}
-
-bool Map::has_non_instance_prototype() const {
- if (!has_prototype_slot()) return false;
- return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
-}
-
-
-void Map::set_is_constructor(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kIsConstructor));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsConstructor));
- }
-}
-
-
-bool Map::is_constructor() const {
- return ((1 << kIsConstructor) & bit_field()) != 0;
-}
-
-BOOL_ACCESSORS(Map, bit_field, has_prototype_slot, kHasPrototypeSlot)
-
-void Map::set_has_hidden_prototype(bool value) {
- set_bit_field3(HasHiddenPrototype::update(bit_field3(), value));
-}
-
-bool Map::has_hidden_prototype() const {
- return HasHiddenPrototype::decode(bit_field3());
-}
-
-
-void Map::set_has_indexed_interceptor() {
- set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
-}
-
-bool Map::has_indexed_interceptor() const {
- return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
-}
-
-
-void Map::set_is_undetectable() {
- set_bit_field(bit_field() | (1 << kIsUndetectable));
-}
-
-bool Map::is_undetectable() const {
- return ((1 << kIsUndetectable) & bit_field()) != 0;
-}
-
-
-void Map::set_has_named_interceptor() {
- set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
-}
-
-bool Map::has_named_interceptor() const {
- return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
-}
-
-
-void Map::set_is_access_check_needed(bool access_check_needed) {
- if (access_check_needed) {
- set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsAccessCheckNeeded));
- }
-}
-
-bool Map::is_access_check_needed() const {
- return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
-}
-
-
-void Map::set_is_extensible(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
- }
-}
-
-bool Map::is_extensible() const {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
-}
-
-
-void Map::set_is_prototype_map(bool value) {
- set_bit_field2(IsPrototypeMapBits::update(bit_field2(), value));
-}
-
-bool Map::is_prototype_map() const {
- return IsPrototypeMapBits::decode(bit_field2());
-}
-
-bool Map::is_abandoned_prototype_map() const {
- return is_prototype_map() && !owns_descriptors();
-}
-
-bool Map::should_be_fast_prototype_map() const {
- if (!prototype_info()->IsPrototypeInfo()) return false;
- return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
-}
-
-void Map::set_elements_kind(ElementsKind elements_kind) {
- CHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
- DCHECK_LE(kElementsKindCount, 1 << Map::ElementsKindBits::kSize);
- set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
- DCHECK(this->elements_kind() == elements_kind);
-}
-
-ElementsKind Map::elements_kind() const {
- return Map::ElementsKindBits::decode(bit_field2());
-}
-
-bool Map::has_fast_smi_elements() const {
- return IsSmiElementsKind(elements_kind());
-}
-
-bool Map::has_fast_object_elements() const {
- return IsObjectElementsKind(elements_kind());
-}
-
-bool Map::has_fast_smi_or_object_elements() const {
- return IsSmiOrObjectElementsKind(elements_kind());
-}
-
-bool Map::has_fast_double_elements() const {
- return IsDoubleElementsKind(elements_kind());
-}
-
-bool Map::has_fast_elements() const {
- return IsFastElementsKind(elements_kind());
-}
-
-bool Map::has_sloppy_arguments_elements() const {
- return IsSloppyArgumentsElementsKind(elements_kind());
-}
-
-bool Map::has_fast_sloppy_arguments_elements() const {
- return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
-}
-
-bool Map::has_fast_string_wrapper_elements() const {
- return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
-}
-
-bool Map::has_fixed_typed_array_elements() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
-}
-
-bool Map::has_dictionary_elements() const {
- return IsDictionaryElementsKind(elements_kind());
-}
-
-
-void Map::set_dictionary_map(bool value) {
- uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
- new_bit_field3 = IsUnstable::update(new_bit_field3, value);
- set_bit_field3(new_bit_field3);
-}
-
-bool Map::is_dictionary_map() const {
- return DictionaryMap::decode(bit_field3());
-}
-
-void Map::set_owns_descriptors(bool owns_descriptors) {
- set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
-}
-
-bool Map::owns_descriptors() const {
- return OwnsDescriptors::decode(bit_field3());
-}
-
-
-void Map::set_is_callable() { set_bit_field(bit_field() | (1 << kIsCallable)); }
-
-
-bool Map::is_callable() const {
- return ((1 << kIsCallable) & bit_field()) != 0;
-}
-
-
-void Map::deprecate() {
- set_bit_field3(Deprecated::update(bit_field3(), true));
- if (FLAG_trace_maps) {
- LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
- }
-}
-
-bool Map::is_deprecated() const { return Deprecated::decode(bit_field3()); }
-
-void Map::set_migration_target(bool value) {
- set_bit_field3(IsMigrationTarget::update(bit_field3(), value));
-}
-
-bool Map::is_migration_target() const {
- return IsMigrationTarget::decode(bit_field3());
-}
-
-void Map::set_immutable_proto(bool value) {
- set_bit_field3(ImmutablePrototype::update(bit_field3(), value));
-}
-
-bool Map::is_immutable_proto() const {
- return ImmutablePrototype::decode(bit_field3());
-}
-
-void Map::set_new_target_is_base(bool value) {
- set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
-}
-
-bool Map::new_target_is_base() const {
- return NewTargetIsBase::decode(bit_field3());
-}
-
-void Map::set_may_have_interesting_symbols(bool value) {
- set_bit_field3(MayHaveInterestingSymbols::update(bit_field3(), value));
-}
-
-bool Map::may_have_interesting_symbols() const {
- return MayHaveInterestingSymbols::decode(bit_field3());
-}
-
-void Map::set_construction_counter(int value) {
- set_bit_field3(ConstructionCounter::update(bit_field3(), value));
-}
-
-int Map::construction_counter() const {
- return ConstructionCounter::decode(bit_field3());
-}
-
-
-void Map::mark_unstable() {
- set_bit_field3(IsUnstable::update(bit_field3(), true));
-}
-
-bool Map::is_stable() const { return !IsUnstable::decode(bit_field3()); }
-
-bool Map::CanBeDeprecated() const {
- int descriptor = LastAdded();
- for (int i = 0; i <= descriptor; i++) {
- PropertyDetails details = instance_descriptors()->GetDetails(i);
- if (details.representation().IsNone()) return true;
- if (details.representation().IsSmi()) return true;
- if (details.representation().IsDouble()) return true;
- if (details.representation().IsHeapObject()) return true;
- if (details.kind() == kData && details.location() == kDescriptor) {
- return true;
- }
- }
- return false;
-}
-
-
-void Map::NotifyLeafMapLayoutChange() {
- if (is_stable()) {
- mark_unstable();
- dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(),
- DependentCode::kPrototypeCheckGroup);
- }
-}
-
-bool Map::CanTransition() const {
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-
-bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
-bool Map::IsPrimitiveMap() const {
- STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- return instance_type() <= LAST_PRIMITIVE_TYPE;
-}
-bool Map::IsJSReceiverMap() const {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-bool Map::IsJSObjectMap() const {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
-}
-bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
-bool Map::IsJSFunctionMap() const {
- return instance_type() == JS_FUNCTION_TYPE;
-}
-bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
-bool Map::IsJSGlobalProxyMap() const {
- return instance_type() == JS_GLOBAL_PROXY_TYPE;
-}
-bool Map::IsJSGlobalObjectMap() const {
- return instance_type() == JS_GLOBAL_OBJECT_TYPE;
-}
-bool Map::IsJSTypedArrayMap() const {
- return instance_type() == JS_TYPED_ARRAY_TYPE;
-}
-bool Map::IsJSDataViewMap() const {
- return instance_type() == JS_DATA_VIEW_TYPE;
-}
-
-bool Map::IsSpecialReceiverMap() const {
- bool result = IsSpecialReceiverInstanceType(instance_type());
- DCHECK_IMPLIES(!result,
- !has_named_interceptor() && !is_access_check_needed());
- return result;
-}
-
-Object* Map::prototype() const {
- return READ_FIELD(this, kPrototypeOffset);
-}
-
-
-void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
- WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
-}
-
-LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
- DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
- return LayoutDescriptor::cast_gc_safe(layout_desc);
-}
-
-
-bool Map::HasFastPointerLayout() const {
- DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
- return LayoutDescriptor::IsFastPointerLayout(layout_desc);
-}
-
-
-void Map::UpdateDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- set_instance_descriptors(descriptors);
- if (FLAG_unbox_double_fields) {
- if (layout_descriptor()->IsSlowLayout()) {
- set_layout_descriptor(layout_desc);
- }
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK_EQ(Map::GetVisitorId(this), visitor_id());
- }
-#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
- DCHECK(visitor_id() == Map::GetVisitorId(this));
-#endif
- }
-}
-
-
-void Map::InitializeDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- int len = descriptors->number_of_descriptors();
- set_instance_descriptors(descriptors);
- SetNumberOfOwnDescriptors(len);
-
- if (FLAG_unbox_double_fields) {
- set_layout_descriptor(layout_desc);
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
- }
-#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
-#endif
- set_visitor_id(Map::GetVisitorId(this));
- }
-}
-
-
-ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset, FLAG_unbox_double_fields)
-
-void Map::set_bit_field3(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
- WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
- }
- WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
-}
-
-
-uint32_t Map::bit_field3() const {
- return READ_UINT32_FIELD(this, kBitField3Offset);
-}
-
-LayoutDescriptor* Map::GetLayoutDescriptor() const {
- return FLAG_unbox_double_fields ? layout_descriptor()
- : LayoutDescriptor::FastPointerLayout();
-}
-
-
-void Map::AppendDescriptor(Descriptor* desc) {
- DescriptorArray* descriptors = instance_descriptors();
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- descriptors->Append(desc);
- SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
-
- // Properly mark the map if the {desc} is an "interesting symbol".
- if (desc->GetKey()->IsInterestingSymbol()) {
- set_may_have_interesting_symbols(true);
- }
- PropertyDetails details = desc->GetDetails();
- if (details.location() == kField) {
- DCHECK_GT(UnusedPropertyFields(), 0);
- AccountAddedPropertyField();
- }
-
-// This function does not support appending double field descriptors and
-// it should never try to (otherwise, layout descriptor must be updated too).
-#ifdef DEBUG
- DCHECK(details.location() != kField || !details.representation().IsDouble());
-#endif
-}
-
-Object* Map::GetBackPointer() const {
- Object* object = constructor_or_backpointer();
- if (object->IsMap()) {
- return object;
- }
- return GetIsolate()->heap()->undefined_value();
-}
-
-Map* Map::ElementsTransitionMap() {
- DisallowHeapAllocation no_gc;
- return TransitionsAccessor(this, &no_gc)
- .SearchSpecial(GetHeap()->elements_transition_symbol());
-}
-
-
-ACCESSORS(Map, raw_transitions, Object, kTransitionsOrPrototypeInfoOffset)
-
-
-Object* Map::prototype_info() const {
- DCHECK(is_prototype_map());
- return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
-}
-
-
-void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
- CHECK(is_prototype_map());
- WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, Map::kTransitionsOrPrototypeInfoOffset, value, mode);
-}
-
-
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
- CHECK(value->IsMap());
- CHECK(GetBackPointer()->IsUndefined(GetIsolate()));
- CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
- constructor_or_backpointer());
- set_constructor_or_backpointer(value, mode);
-}
-
-ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
-ACCESSORS(Map, constructor_or_backpointer, Object,
- kConstructorOrBackPointerOffset)
-
-Object* Map::GetConstructor() const {
- Object* maybe_constructor = constructor_or_backpointer();
- // Follow any back pointers.
- while (maybe_constructor->IsMap()) {
- maybe_constructor =
- Map::cast(maybe_constructor)->constructor_or_backpointer();
- }
- return maybe_constructor;
-}
-
-FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
- Object* constructor = GetConstructor();
- if (constructor->IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
- return JSFunction::cast(constructor)->shared()->get_api_func_data();
- }
- DCHECK(constructor->IsFunctionTemplateInfo());
- return FunctionTemplateInfo::cast(constructor);
-}
-
-void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
- // Never overwrite a back pointer with a constructor.
- CHECK(!constructor_or_backpointer()->IsMap());
- set_constructor_or_backpointer(constructor, mode);
-}
-
-
-Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
- return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
- map->UnusedPropertyFields());
-}
-
Object* JSBoundFunction::raw_bound_target_function() const {
return READ_FIELD(this, kBoundTargetFunctionOffset);
}
@@ -3774,11 +2258,20 @@ SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS_CHECKED2(AccessorInfo, getter, Object, kGetterOffset, true,
+ Foreign::IsNormalized(value))
+ACCESSORS_CHECKED2(AccessorInfo, setter, Object, kSetterOffset, true,
+ Foreign::IsNormalized(value));
ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
+bool AccessorInfo::has_getter() {
+ bool result = getter() != Smi::kZero;
+ DCHECK_EQ(result, getter() != Smi::kZero &&
+ Foreign::cast(getter())->foreign_address() != nullptr);
+ return result;
+}
+
ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
@@ -3890,6 +2383,7 @@ BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
kCanInterceptSymbolsBit)
BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
+BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
@@ -3952,18 +2446,6 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
IsImmutablePrototype::update(Smi::ToInt(data()), immutable)));
}
-int TemplateList::length() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
-}
-
-Object* TemplateList::get(int index) const {
- return FixedArray::cast(this)->get(kFirstElementIndex + index);
-}
-
-void TemplateList::set(int index, Object* value) {
- FixedArray::cast(this)->set(kFirstElementIndex + index, value);
-}
-
ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
kTransitionInfoOrBoilerplateOffset)
@@ -4056,8 +2538,7 @@ bool JSFunction::HasOptimizationMarker() {
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
- DCHECK(!feedback_vector()->has_optimized_code());
- feedback_vector()->SetOptimizationMarker(OptimizationMarker::kNone);
+ feedback_vector()->ClearOptimizationMarker();
}
bool JSFunction::IsInterpreted() {
@@ -4094,22 +2575,6 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
}
}
-bool Map::IsInobjectSlackTrackingInProgress() const {
- return construction_counter() != Map::kNoSlackTracking;
-}
-
-
-void Map::InobjectSlackTrackingStep() {
- // Slack tracking should only be performed on an initial map.
- DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
- if (!IsInobjectSlackTrackingInProgress()) return;
- int counter = construction_counter();
- set_construction_counter(counter - 1);
- if (counter == kSlackTrackingCounterEnd) {
- CompleteInobjectSlackTracking();
- }
-}
-
AbstractCode* JSFunction::abstract_code() {
if (IsInterpreted()) {
return AbstractCode::cast(shared()->bytecode_array());
@@ -4253,24 +2718,21 @@ bool JSFunction::is_compiled() {
return code() != builtins->builtin(Builtins::kCompileLazy);
}
-ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
+ACCESSORS(JSProxy, target, Object, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
-ACCESSORS(JSCollection, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
-ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
-
-ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
-ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
-
+// static
+bool Foreign::IsNormalized(Object* value) {
+ if (value == Smi::kZero) return true;
+ return Foreign::cast(value)->foreign_address() != nullptr;
+}
Address Foreign::foreign_address() {
return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
}
-
void Foreign::set_foreign_address(Address value) {
WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
}
@@ -4966,110 +3428,6 @@ int WeakHashTableShape::GetMapRootIndex() {
return Heap::kWeakHashTableMapRootIndex;
}
-int Map::SlackForArraySize(int old_size, int size_limit) {
- const int max_slack = size_limit - old_size;
- CHECK_LE(0, max_slack);
- if (old_size < 4) {
- DCHECK_LE(1, max_slack);
- return 1;
- }
- return Min(max_slack, old_size / 4);
-}
-
-int TypeFeedbackInfo::ic_total_count() {
- int current = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- return ICTotalCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::set_ic_total_count(int count) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- value = ICTotalCountField::update(value,
- ICTotalCountField::decode(count));
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::ic_with_type_info_count() {
- int current = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- return ICsWithTypeInfoCountField::decode(current);
-}
-
-
-void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
- if (delta == 0) return;
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
- // We can get negative count here when the type-feedback info is
- // shared between two code objects. The can only happen when
- // the debugger made a shallow copy of code object (see Heap::CopyCode).
- // Since we do not optimize when the debugger is active, we can skip
- // this counter update.
- if (new_count >= 0) {
- new_count &= ICsWithTypeInfoCountField::kMask;
- value = ICsWithTypeInfoCountField::update(value, new_count);
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
- }
-}
-
-
-int TypeFeedbackInfo::ic_generic_count() {
- return Smi::ToInt(READ_FIELD(this, kStorage3Offset));
-}
-
-
-void TypeFeedbackInfo::change_ic_generic_count(int delta) {
- if (delta == 0) return;
- int new_count = ic_generic_count() + delta;
- if (new_count >= 0) {
- new_count &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(new_count));
- }
-}
-
-
-void TypeFeedbackInfo::initialize_storage() {
- WRITE_FIELD(this, kStorage1Offset, Smi::kZero);
- WRITE_FIELD(this, kStorage2Offset, Smi::kZero);
- WRITE_FIELD(this, kStorage3Offset, Smi::kZero);
-}
-
-
-void TypeFeedbackInfo::change_own_type_change_checksum() {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- int checksum = OwnTypeChangeChecksum::decode(value);
- checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
- value = OwnTypeChangeChecksum::update(value, checksum);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
-}
-
-
-void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- value = InlinedTypeChangeChecksum::update(value, checksum & mask);
- // Ensure packed bit field is in Smi range.
- if (value > Smi::kMaxValue) value |= Smi::kMinValue;
- if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
-}
-
-
-int TypeFeedbackInfo::own_type_change_checksum() {
- int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
- return OwnTypeChangeChecksum::decode(value);
-}
-
-
-bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
- int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
- int mask = (1 << kTypeChangeChecksumBits) - 1;
- return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
-}
-
Relocatable::Relocatable(Isolate* isolate) {
isolate_ = isolate;
prev_ = isolate->relocatable_top();
@@ -5092,15 +3450,6 @@ Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
return key;
}
-
-Object* JSMapIterator::CurrentValue() {
- OrderedHashMap* table(OrderedHashMap::cast(this->table()));
- int index = Smi::ToInt(this->index());
- Object* value = table->ValueAt(index);
- DCHECK(!value->IsTheHole(table->GetIsolate()));
- return value;
-}
-
// Predictably converts HeapObject* or Address to uint32 by calculating
// offset of the address in respective MemoryChunk.
static inline uint32_t ObjectAddressForHashing(void* object) {
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 2ac24f823d..f13c222632 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -16,6 +16,8 @@
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -239,6 +241,14 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case LOAD_HANDLER_TYPE:
+ LoadHandler::cast(this)->LoadHandlerPrint(os);
+ break;
+
+ case STORE_HANDLER_TYPE:
+ StoreHandler::cast(this)->StoreHandlerPrint(os);
+ break;
+
default:
os << "UNKNOWN TYPE " << map()->instance_type();
UNREACHABLE();
@@ -747,24 +757,31 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
- os << "\n SharedFunctionInfo: " << Brief(shared_function_info());
- os << "\n Optimized Code: " << Brief(optimized_code_cell());
- os << "\n Invocation Count: " << invocation_count();
- os << "\n Profiler Ticks: " << profiler_ticks();
+ os << "\n - shared function info: " << Brief(shared_function_info());
+ os << "\n - optimized code/marker: ";
+ if (has_optimized_code()) {
+ os << Brief(optimized_code());
+ } else {
+ os << optimization_marker();
+ }
+ os << "\n - invocation count: " << invocation_count();
+ os << "\n - profiler ticks: " << profiler_ticks();
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
- os << "\n Slot " << slot << " " << kind << " ";
+ os << "\n - slot " << slot << " " << kind << " ";
FeedbackSlotPrint(os, slot, kind);
int entry_size = iter.entry_size();
+ if (entry_size > 0) os << " {";
for (int i = 0; i < entry_size; i++) {
int index = GetIndex(slot) + i;
- os << "\n [" << index << "]: " << Brief(get(index));
+ os << "\n [" << index << "]: " << Brief(get(index));
}
+ if (entry_size > 0) os << "\n }";
}
os << "\n";
}
@@ -1430,6 +1447,42 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
os << "\n";
}
+void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "LoadHandler");
+ // TODO(ishell): implement printing based on handler kind
+ os << "\n - handler: " << Brief(smi_handler());
+ os << "\n - validity_cell: " << Brief(validity_cell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ os << "\n - data1: " << Brief(data1());
+ }
+ if (data_count >= 2) {
+ os << "\n - data2: " << Brief(data2());
+ }
+ if (data_count >= 3) {
+ os << "\n - data3: " << Brief(data3());
+ }
+ os << "\n";
+}
+
+void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "StoreHandler");
+ // TODO(ishell): implement printing based on handler kind
+ os << "\n - handler: " << Brief(smi_handler());
+ os << "\n - validity_cell: " << Brief(validity_cell());
+ int data_count = data_field_count();
+ if (data_count >= 1) {
+ os << "\n - data1: " << Brief(data1());
+ }
+ if (data_count >= 2) {
+ os << "\n - data2: " << Brief(data2());
+ }
+ if (data_count >= 3) {
+ os << "\n - data3: " << Brief(data3());
+ }
+ os << "\n";
+}
+
void ContextExtension::ContextExtensionPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ContextExtension");
os << "\n - scope_info: " << Brief(scope_info());
@@ -1552,7 +1605,12 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - wrapper: " << Brief(wrapper());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
- os << "\n - eval from shared: " << Brief(eval_from_shared());
+ if (has_eval_from_shared()) {
+ os << "\n - eval from shared: " << Brief(eval_from_shared());
+ }
+ if (is_wrapped()) {
+ os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
+ }
os << "\n - eval from position: " << eval_from_position();
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
@@ -1888,8 +1946,36 @@ extern void _v8_internal_Print_Object(void* object) {
}
extern void _v8_internal_Print_Code(void* object) {
+ i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
- isolate->FindCodeObject(reinterpret_cast<i::Address>(object))->Print();
+
+ i::wasm::WasmCode* wasm_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(address);
+ if (wasm_code) {
+ wasm_code->Print(isolate);
+ return;
+ }
+
+ if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
+ !isolate->heap()->InSpaceSlow(address, i::LO_SPACE)) {
+ i::PrintF(
+ "%p is not within the current isolate's large object or code spaces\n",
+ static_cast<void*>(address));
+ return;
+ }
+
+ i::Code* code = isolate->FindCodeObject(address);
+ if (!code->IsCode()) {
+ i::PrintF("No code object found containing %p\n",
+ static_cast<void*>(address));
+ return;
+ }
+#ifdef ENABLE_DISASSEMBLER
+ i::OFStream os(stdout);
+ code->Disassemble(nullptr, os, address);
+#else // ENABLE_DISASSEMBLER
+ code->Print();
+#endif // ENABLE_DISASSEMBLER
}
extern void _v8_internal_Print_FeedbackMetadata(void* object) {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index bd876e67d7..f8c55e57a6 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -73,6 +73,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
#include "src/utils-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
@@ -1163,7 +1164,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Object);
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "get").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -1536,7 +1537,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
Object);
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
// 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
@@ -1605,9 +1606,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Object);
}
- v8::AccessorNameGetterCallback call_fun =
- v8::ToCData<v8::AccessorNameGetterCallback>(info->getter());
- if (call_fun == nullptr) return isolate->factory()->undefined_value();
+ if (!info->has_getter()) return isolate->factory()->undefined_value();
if (info->is_sloppy() && !receiver->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
@@ -1617,14 +1616,15 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
kDontThrow);
- Handle<Object> result = args.Call(call_fun, name);
+ Handle<Object> result = args.CallAccessorGetter(info, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) return isolate->factory()->undefined_value();
Handle<Object> reboxed_result = handle(*result, isolate);
if (info->replace_on_access() && receiver->IsJSReceiver()) {
- args.Call(reinterpret_cast<GenericNamedPropertySetterCallback>(
- &Accessors::ReconfigureToDataProperty),
- name, result);
+ args.CallNamedSetterCallback(
+ reinterpret_cast<GenericNamedPropertySetterCallback>(
+ &Accessors::ReconfigureToDataProperty),
+ name, result);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
return reboxed_result;
@@ -1733,7 +1733,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
should_throw);
- Handle<Object> result = args.Call(call_fun, name, value);
+ Handle<Object> result = args.CallNamedSetterCallback(call_fun, name, value);
// In the case of AccessorNameSetterCallback, we know that the result value
// cannot have been set, so the result of Call will be null. In the case of
// AccessorNameBooleanSetterCallback, the result will either be null
@@ -1853,20 +1853,9 @@ MaybeHandle<Object> GetPropertyWithInterceptorInternal(
*holder, kDontThrow);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
+ result = args.CallIndexedGetter(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
+ result = args.CallNamedGetter(interceptor, it->name());
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
@@ -1898,17 +1887,9 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
if (!interceptor->query()->IsUndefined(isolate)) {
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyQueryCallback query =
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
- result = args.Call(query, index);
+ result = args.CallIndexedQuery(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyQueryCallback query =
- v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query());
- result = args.Call(query, name);
+ result = args.CallNamedQuery(interceptor, it->name());
}
if (!result.is_null()) {
int32_t value;
@@ -1919,17 +1900,9 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
// TODO(verwaest): Use GetPropertyWithInterceptor?
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
+ result = args.CallIndexedGetter(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
+ result = args.CallNamedGetter(interceptor, it->name());
}
if (!result.is_null()) return Just(DONT_ENUM);
}
@@ -1960,22 +1933,11 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
*holder, should_throw);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertySetterCallback setter =
- v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
// TODO(neis): In the future, we may want to actually return the
// interceptor's result, which then should be a boolean.
- result = !args.Call(setter, index, value).is_null();
+ result = !args.CallIndexedSetter(interceptor, it->index(), value).is_null();
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- result = !args.Call(setter, name, value).is_null();
+ result = !args.CallNamedSetter(interceptor, it->name(), value).is_null();
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
@@ -2025,20 +1987,11 @@ Maybe<bool> DefinePropertyWithInterceptorInternal(
}
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDefinerCallback definer =
- v8::ToCData<v8::IndexedPropertyDefinerCallback>(interceptor->definer());
- result = !args.Call(definer, index, *descriptor).is_null();
+ result = !args.CallIndexedDefiner(interceptor, it->index(), *descriptor)
+ .is_null();
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
-
- v8::GenericNamedPropertyDefinerCallback definer =
- v8::ToCData<v8::GenericNamedPropertyDefinerCallback>(
- interceptor->definer());
- result = !args.Call(definer, name, *descriptor).is_null();
+ result =
+ !args.CallNamedDefiner(interceptor, it->name(), *descriptor).is_null();
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
@@ -2163,11 +2116,13 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
uint32_t hash = name->Hash();
if (object->IsJSGlobalObject()) {
- Handle<JSGlobalObject> global_obj(JSGlobalObject::cast(*object));
+ Handle<JSGlobalObject> global_obj = Handle<JSGlobalObject>::cast(object);
Handle<GlobalDictionary> dictionary(global_obj->global_dictionary());
int entry = dictionary->FindEntry(isolate, name, hash);
if (entry == GlobalDictionary::kNotFound) {
+ DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(global_obj->map()));
auto cell = isolate->factory()->NewPropertyCell(name);
cell->set_value(*value);
auto cell_type = value->IsUndefined(isolate)
@@ -2187,6 +2142,8 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
int entry = dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
+ DCHECK_IMPLIES(object->map()->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
dictionary = NameDictionary::Add(dictionary, name, value, details);
object->SetProperties(*dictionary);
} else {
@@ -2613,7 +2570,7 @@ bool Object::IterationHasObservableEffects() {
JSArray* array = JSArray::cast(this);
Isolate* isolate = array->GetIsolate();
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return true;
#endif
@@ -3280,7 +3237,10 @@ VisitorId Map::GetVisitorId(Map* map) {
if (instance_type == ALLOCATION_SITE_TYPE) {
return kVisitAllocationSite;
}
+ return kVisitStruct;
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
return kVisitStruct;
default:
@@ -4213,6 +4173,9 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// JSGlobalProxy must never be normalized
DCHECK(!object->IsJSGlobalProxy());
+ DCHECK_IMPLIES(new_map->is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(*new_map));
+
Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
Handle<Map> map(object->map());
@@ -4474,7 +4437,10 @@ void Map::DeprecateTransitionTree() {
transitions.GetTarget(i)->DeprecateTransitionTree();
}
DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
- deprecate();
+ set_is_deprecated(true);
+ if (FLAG_trace_maps) {
+ LOG(GetIsolate(), MapEvent("Deprecate", this, nullptr));
+ }
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(), DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange();
@@ -5547,7 +5513,13 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
void JSProxy::Revoke(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
- if (!proxy->IsRevoked()) proxy->set_handler(isolate->heap()->null_value());
+ // ES#sec-proxy-revocation-functions
+ if (!proxy->IsRevoked()) {
+ // 5. Set p.[[ProxyTarget]] to null.
+ proxy->set_target(isolate->heap()->null_value());
+ // 6. Set p.[[ProxyHandler]] to null.
+ proxy->set_handler(isolate->heap()->null_value());
+ }
DCHECK(proxy->IsRevoked());
}
@@ -5563,7 +5535,7 @@ Maybe<bool> JSProxy::IsArray(Handle<JSProxy> proxy) {
isolate->factory()->NewStringFromAsciiChecked("IsArray")));
return Nothing<bool>();
}
- object = handle(proxy->target(), isolate);
+ object = handle(JSReceiver::cast(proxy->target()), isolate);
if (object->IsJSArray()) return Just(true);
if (!object->IsJSProxy()) return Just(false);
}
@@ -5588,7 +5560,7 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "has").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -5661,7 +5633,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -5712,7 +5684,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -5986,7 +5958,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Map> original_map(object->map());
Handle<Map> map = Map::Update(original_map);
- map->set_migration_target(true);
+ map->set_is_migration_target(true);
MigrateToMap(object, map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, *map);
@@ -6300,7 +6272,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
new_map->set_may_have_interesting_symbols(new_map->has_named_interceptor() ||
new_map->is_access_check_needed());
- new_map->set_dictionary_map(false);
+ new_map->set_is_dictionary_map(false);
NotifyMapChange(old_map, new_map, isolate);
@@ -6626,19 +6598,9 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
*holder, should_throw);
Handle<Object> result;
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDeleterCallback deleter =
- v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
- result = args.Call(deleter, index);
+ result = args.CallIndexedDeleter(interceptor, it->index());
} else {
- DCHECK_IMPLIES(it->name()->IsSymbol(),
- interceptor->can_intercept_symbols());
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyDeleterCallback deleter =
- v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
- interceptor->deleter());
- result = args.Call(deleter, name);
+ result = args.CallNamedDeleter(interceptor, it->name());
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
@@ -7526,7 +7488,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "defineProperty").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -7690,19 +7652,9 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
*holder, kDontThrow);
if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyDescriptorCallback descriptorCallback =
- v8::ToCData<v8::IndexedPropertyDescriptorCallback>(
- interceptor->descriptor());
-
- result = args.Call(descriptorCallback, index);
+ result = args.CallIndexedDescriptor(interceptor, it->index());
} else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyDescriptorCallback descriptorCallback =
- v8::ToCData<v8::GenericNamedPropertyDescriptorCallback>(
- interceptor->descriptor());
- result = args.Call(descriptorCallback, name);
+ result = args.CallNamedDescriptor(interceptor, it->name());
}
if (!result.is_null()) {
// Request successfully intercepted, try to set the property
@@ -7809,7 +7761,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "getOwnPropertyDescriptor").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -8261,7 +8213,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -8369,7 +8321,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
*factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
Handle<Object> trap;
@@ -8839,9 +8791,10 @@ MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<JSReceiver> object,
PropertyFilter filter,
+ bool try_fast_path,
bool get_entries) {
Handle<FixedArray> values_or_entries;
- if (filter == ENUMERABLE_STRINGS) {
+ if (try_fast_path && filter == ENUMERABLE_STRINGS) {
Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
isolate, object, get_entries, &values_or_entries);
if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
@@ -8894,13 +8847,17 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
}
MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, false);
}
MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, true);
}
bool Map::DictionaryElementsInPrototypeChainOnly() {
@@ -9077,13 +9034,13 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size,
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
- new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
+ new_bit_field3 = OwnsDescriptorsBit::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3,
kInvalidEnumCacheSentinel);
- new_bit_field3 = Deprecated::update(new_bit_field3, false);
+ new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
if (!map->is_dictionary_map()) {
- new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
}
result->set_bit_field3(new_bit_field3);
return result;
@@ -9164,8 +9121,8 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
// Clear the unused_property_fields explicitly as this field should not
// be accessed for normalized maps.
result->SetInObjectUnusedPropertyFields(0);
- result->set_dictionary_map(true);
- result->set_migration_target(false);
+ result->set_is_dictionary_map(true);
+ result->set_is_migration_target(false);
result->set_may_have_interesting_symbols(true);
result->set_construction_counter(kNoSlackTracking);
@@ -9184,7 +9141,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
// static
Handle<Map> Map::TransitionToImmutableProto(Handle<Map> map) {
Handle<Map> new_map = Map::Copy(map, "ImmutablePrototype");
- new_map->set_immutable_proto(true);
+ new_map->set_is_immutable_proto(true);
return new_map;
}
@@ -9723,8 +9680,8 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
bool* created_new_map) {
RuntimeCallTimerScope stats_scope(
*map, map->is_prototype_map()
- ? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
- : &RuntimeCallStats::Map_TransitionToDataProperty);
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
+ : RuntimeCallCounterId::kMap_TransitionToDataProperty);
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
@@ -9839,8 +9796,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
RuntimeCallTimerScope stats_scope(
isolate,
map->is_prototype_map()
- ? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
- : &RuntimeCallStats::Map_TransitionToAccessorProperty);
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToAccessorProperty
+ : RuntimeCallCounterId::kMap_TransitionToAccessorProperty);
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
@@ -10291,8 +10248,10 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
// static
Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
- Handle<ArrayList> result = Handle<ArrayList>::cast(
- isolate->factory()->NewFixedArray(size + kFirstIndex));
+ Handle<FixedArray> fixed_array =
+ isolate->factory()->NewFixedArray(size + kFirstIndex);
+ fixed_array->set_map_no_write_barrier(isolate->heap()->array_list_map());
+ Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
result->SetLength(0);
return result;
}
@@ -10331,10 +10290,13 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
// static
Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
const bool empty = (array->length() == 0);
- auto ret = Handle<ArrayList>::cast(
- EnsureSpaceInFixedArray(array, kFirstIndex + length));
- if (empty) ret->SetLength(0);
- return ret;
+ auto ret = EnsureSpaceInFixedArray(array, kFirstIndex + length);
+ if (empty) {
+ ret->set_map_no_write_barrier(array->GetHeap()->array_list_map());
+
+ Handle<ArrayList>::cast(ret)->SetLength(0);
+ }
+ return Handle<ArrayList>::cast(ret);
}
Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
@@ -10710,7 +10672,7 @@ Handle<Object> String::ToNumber(Handle<String> subject) {
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
// or the 'I' character ('Infinity'). All of that have codes not greater
// than '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
+ if (data[start_pos] != 'I' && data[start_pos] != 0xA0) {
return isolate->factory()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
@@ -12551,15 +12513,19 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
return true;
}
+namespace {
-static void InvalidatePrototypeChainsInternal(Map* map) {
+// This function must be kept in sync with
+// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
+// before jumping here.
+PrototypeInfo* InvalidateOnePrototypeValidityCellInternal(Map* map) {
DCHECK(map->is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map));
}
Object* maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return;
+ if (!maybe_proto_info->IsPrototypeInfo()) return nullptr;
PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
Object* maybe_cell = proto_info->validity_cell();
if (maybe_cell->IsCell()) {
@@ -12567,6 +12533,12 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
Cell* cell = Cell::cast(maybe_cell);
cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
+ return proto_info;
+}
+
+void InvalidatePrototypeChainsInternal(Map* map) {
+ PrototypeInfo* proto_info = InvalidateOnePrototypeValidityCellInternal(map);
+ if (proto_info == nullptr) return;
WeakFixedArray::Iterator iterator(proto_info->prototype_users());
// For now, only maps register themselves as users.
@@ -12577,13 +12549,27 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
}
}
+} // namespace
// static
-void JSObject::InvalidatePrototypeChains(Map* map) {
+Map* JSObject::InvalidatePrototypeChains(Map* map) {
DisallowHeapAllocation no_gc;
InvalidatePrototypeChainsInternal(map);
+ return map;
}
+// We also invalidate global objects validity cell when a new lexical
+// environment variable is added. This is necessary to ensure that
+// Load/StoreGlobalIC handlers that load/store from global object's prototype
+// get properly invalidated.
+// Note, that the normal Load/StoreICs that load/store through the global object
+// in the prototype chain are not affected by appearance of a new lexical
+// variable and therefore we don't propagate invalidation down.
+// static
+void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject* global) {
+ DisallowHeapAllocation no_gc;
+ InvalidateOnePrototypeValidityCellInternal(global->map());
+}
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
@@ -12662,6 +12648,21 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
}
// static
+bool Map::IsPrototypeChainInvalidated(Map* map) {
+ DCHECK(map->is_prototype_map());
+ Object* maybe_proto_info = map->prototype_info();
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
+ Object* maybe_cell = proto_info->validity_cell();
+ if (maybe_cell->IsCell()) {
+ Cell* cell = Cell::cast(maybe_cell);
+ return cell->value() == Smi::FromInt(Map::kPrototypeChainInvalid);
+ }
+ }
+ return true;
+}
+
+// static
Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
Isolate* isolate) {
DCHECK(!prototype.is_null());
@@ -12688,7 +12689,8 @@ Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
// static
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
bool enable_prototype_setup_mode) {
- RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
+ RuntimeCallTimerScope stats_scope(*map,
+ RuntimeCallCounterId::kMap_SetPrototype);
bool is_hidden = false;
if (prototype->IsJSObject()) {
@@ -12815,7 +12817,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
JSObject::MigrateToMap(function, new_map);
new_map->SetConstructor(*value);
- new_map->set_non_instance_prototype(true);
+ new_map->set_has_non_instance_prototype(true);
FunctionKind kind = function->shared()->kind();
Handle<Context> native_context(function->context()->native_context());
@@ -12829,7 +12831,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
isolate);
} else {
construct_prototype = Handle<JSReceiver>::cast(value);
- function->map()->set_non_instance_prototype(false);
+ function->map()->set_has_non_instance_prototype(false);
}
SetInstancePrototype(isolate, function, construct_prototype);
@@ -12976,6 +12978,56 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
map->StartInobjectSlackTracking();
}
+namespace {
+bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
+ Handle<JSFunction> constructor,
+ Handle<Map> constructor_initial_map) {
+ // Check that |function|'s initial map still in sync with the |constructor|,
+ // otherwise we must create a new initial map for |function|.
+ if (new_target->has_initial_map() &&
+ new_target->initial_map()->GetConstructor() == *constructor) {
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ return true;
+ }
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ DCHECK(CanSubclassHaveInobjectProperties(instance_type));
+ // Create a new map with the size and number of in-object properties
+ // suggested by |function|.
+
+ // Link initial map and constructor function if the new.target is actually a
+ // subclass constructor.
+ if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
+
+ int instance_size;
+ int in_object_properties;
+ int embedder_fields =
+ JSObject::GetEmbedderFieldCount(*constructor_initial_map);
+ bool success = JSFunction::CalculateInstanceSizeForDerivedClass(
+ new_target, instance_type, embedder_fields, &instance_size,
+ &in_object_properties);
+
+ Handle<Map> map;
+ if (success) {
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->UnusedPropertyFields();
+ CHECK_LE(constructor_initial_map->UsedInstanceSize(), instance_size);
+ int unused_property_fields = in_object_properties - pre_allocated;
+ map = Map::CopyInitialMap(constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+ } else {
+ map = Map::CopyInitialMap(constructor_initial_map);
+ }
+ map->set_new_target_is_base(false);
+ Handle<Object> prototype(new_target->instance_prototype(), isolate);
+ JSFunction::SetInitialMap(new_target, map, prototype);
+ DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ map->SetConstructor(*constructor);
+ map->set_construction_counter(Map::kNoSlackTracking);
+ map->StartInobjectSlackTracking();
+ return true;
+}
+
+} // namespace
// static
MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
@@ -12986,55 +13038,16 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
if (*new_target == *constructor) return constructor_initial_map;
+ Handle<Map> result_map;
// Fast case, new.target is a subclass of constructor. The map is cacheable
// (and may already have been cached). new.target.prototype is guaranteed to
// be a JSReceiver.
if (new_target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
-
- // Check that |function|'s initial map still in sync with the |constructor|,
- // otherwise we must create a new initial map for |function|.
- if (function->has_initial_map() &&
- function->initial_map()->GetConstructor() == *constructor) {
+ if (FastInitializeDerivedMap(isolate, function, constructor,
+ constructor_initial_map)) {
return handle(function->initial_map(), isolate);
}
-
- // Create a new map with the size and number of in-object properties
- // suggested by |function|.
-
- // Link initial map and constructor function if the new.target is actually a
- // subclass constructor.
- if (IsDerivedConstructor(function->shared()->kind())) {
- Handle<Object> prototype(function->instance_prototype(), isolate);
- InstanceType instance_type = constructor_initial_map->instance_type();
- DCHECK(CanSubclassHaveInobjectProperties(instance_type));
- int embedder_fields =
- JSObject::GetEmbedderFieldCount(*constructor_initial_map);
- int pre_allocated = constructor_initial_map->GetInObjectProperties() -
- constructor_initial_map->UnusedPropertyFields();
- int instance_size;
- int in_object_properties;
- bool success = CalculateInstanceSizeForDerivedClass(
- function, instance_type, embedder_fields, &instance_size,
- &in_object_properties);
-
- int unused_property_fields = in_object_properties - pre_allocated;
-
- Handle<Map> map;
- if (success) {
- map = Map::CopyInitialMap(constructor_initial_map, instance_size,
- in_object_properties, unused_property_fields);
- } else {
- map = Map::CopyInitialMap(constructor_initial_map);
- }
- map->set_new_target_is_base(false);
-
- JSFunction::SetInitialMap(function, map, prototype);
- map->SetConstructor(*constructor);
- map->set_construction_counter(Map::kNoSlackTracking);
- map->StartInobjectSlackTracking();
- return map;
- }
}
// Slow path, new.target is either a proxy or can't cache the map.
@@ -13076,7 +13089,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
map->set_new_target_is_base(false);
- DCHECK(prototype->IsJSReceiver());
+ CHECK(prototype->IsJSReceiver());
if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
map->SetConstructor(*constructor);
return map;
@@ -13183,7 +13196,8 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
if (FLAG_harmony_function_tostring) {
- return Handle<String>::cast(shared_info->GetSourceCodeHarmony());
+ return Handle<String>::cast(
+ SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
}
IncrementalStringBuilder builder(isolate);
@@ -13214,7 +13228,22 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
builder.AppendString(handle(shared_info->name(), isolate));
}
}
- builder.AppendString(Handle<String>::cast(shared_info->GetSourceCode()));
+ if (shared_info->is_wrapped()) {
+ builder.AppendCharacter('(');
+ Handle<FixedArray> args(
+ Script::cast(shared_info->script())->wrapped_arguments());
+ int argc = args->length();
+ for (int i = 0; i < argc; i++) {
+ if (i > 0) builder.AppendCString(", ");
+ builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ }
+ builder.AppendCString(") {\n");
+ }
+ builder.AppendString(
+ Handle<String>::cast(SharedFunctionInfo::GetSourceCode(shared_info)));
+ if (shared_info->is_wrapped()) {
+ builder.AppendCString("\n}");
+ }
return builder.Finish().ToHandleChecked();
}
@@ -13245,10 +13274,10 @@ int Script::GetEvalPosition() {
// Due to laziness, the position may not have been translated from code
// offset yet, which would be encoded as negative integer. In that case,
// translate and set the position.
- if (eval_from_shared()->IsUndefined(GetIsolate())) {
+ if (!has_eval_from_shared()) {
position = 0;
} else {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
+ SharedFunctionInfo* shared = eval_from_shared();
position = shared->abstract_code()->SourcePosition(-position);
}
DCHECK_GE(position, 0);
@@ -13286,6 +13315,15 @@ bool Script::GetPositionInfo(Handle<Script> script, int position,
bool Script::IsUserJavaScript() { return type() == Script::TYPE_NORMAL; }
+bool Script::ContainsAsmModule() {
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(Handle<Script>(this));
+ while (SharedFunctionInfo* info = iter.Next()) {
+ if (info->HasAsmWasmData()) return true;
+ }
+ return false;
+}
+
namespace {
bool GetPositionInfoSlow(const Script* script, int position,
Script::PositionInfo* info) {
@@ -13324,8 +13362,8 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(wasm_compiled_module()));
DCHECK_LE(0, position);
- return compiled_module->GetPositionInfo(static_cast<uint32_t>(position),
- info);
+ return compiled_module->shared()->GetPositionInfo(
+ static_cast<uint32_t>(position), info);
}
if (line_ends()->IsUndefined(GetIsolate())) {
@@ -13456,8 +13494,12 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Isolate* isolate, const FunctionLiteral* fun) {
- DCHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
- DCHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
+ CHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
+ // If this check fails, the problem is most probably the function id
+ // renumbering done by AstFunctionLiteralIdReindexer; in particular, that
+ // AstTraversalVisitor doesn't recurse properly in the construct which
+ // triggers the mismatch.
+ CHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
Object* shared = shared_function_infos()->get(fun->function_literal_id());
if (shared->IsUndefined(isolate) || WeakCell::cast(shared)->cleared()) {
return MaybeHandle<SharedFunctionInfo>();
@@ -13628,14 +13670,14 @@ String* SharedFunctionInfo::DebugName() {
return name();
}
-bool SharedFunctionInfo::HasNoSideEffect() {
- if (!computed_has_no_side_effect()) {
- DisallowHeapAllocation not_handlified;
- Handle<SharedFunctionInfo> info(this);
- set_has_no_side_effect(DebugEvaluate::FunctionHasNoSideEffect(info));
- set_computed_has_no_side_effect(true);
+// static
+bool SharedFunctionInfo::HasNoSideEffect(Handle<SharedFunctionInfo> info) {
+ if (!info->computed_has_no_side_effect()) {
+ bool has_no_side_effect = DebugEvaluate::FunctionHasNoSideEffect(info);
+ info->set_has_no_side_effect(has_no_side_effect);
+ info->set_computed_has_no_side_effect(true);
}
- return has_no_side_effect();
+ return info->has_no_side_effect();
}
// The filter is a pattern that matches function names in this way:
@@ -13680,22 +13722,44 @@ bool SharedFunctionInfo::HasSourceCode() const {
!reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
}
-
-Handle<Object> SharedFunctionInfo::GetSourceCode() {
- if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
- Handle<String> source(String::cast(Script::cast(script())->source()));
- return GetIsolate()->factory()->NewSubString(
- source, start_position(), end_position());
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCode(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> source(String::cast(Script::cast(shared->script())->source()));
+ return isolate->factory()->NewSubString(source, shared->start_position(),
+ shared->end_position());
}
-Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony() {
- Isolate* isolate = GetIsolate();
- if (!HasSourceCode()) return isolate->factory()->undefined_value();
- Handle<String> script_source(String::cast(Script::cast(script())->source()));
- int start_pos = function_token_position();
- if (start_pos == kNoSourcePosition) start_pos = start_position();
- return isolate->factory()->NewSubString(script_source, start_pos,
- end_position());
+// static
+Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
+ Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
+ Handle<String> script_source(
+ String::cast(Script::cast(shared->script())->source()));
+ int start_pos = shared->function_token_position();
+ if (start_pos == kNoSourcePosition) start_pos = shared->start_position();
+ Handle<String> source = isolate->factory()->NewSubString(
+ script_source, start_pos, shared->end_position());
+ if (!shared->is_wrapped()) return source;
+
+ DCHECK(!shared->name_should_print_as_anonymous());
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(Handle<String>(shared->name(), isolate));
+ builder.AppendCString("(");
+ Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments());
+ int argc = args->length();
+ for (int i = 0; i < argc; i++) {
+ if (i > 0) builder.AppendCString(", ");
+ builder.AppendString(Handle<String>(String::cast(args->get(i))));
+ }
+ builder.AppendCString(") {\n");
+ builder.AppendString(source);
+ builder.AppendCString("\n}");
+ return builder.Finish().ToHandleChecked();
}
bool SharedFunctionInfo::IsInlineable() {
@@ -13720,15 +13784,17 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
int* instance_size,
int* in_object_properties) {
int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
- DCHECK_LE(requested_embedder_fields,
- (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
+ int max_nof_fields =
+ (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2;
+ CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
+ *in_object_properties = Min(requested_in_object_properties, max_nof_fields);
+ CHECK_LE(requested_embedder_fields, max_nof_fields - *in_object_properties);
*instance_size =
- Min(header_size +
- ((requested_embedder_fields + requested_in_object_properties)
- << kPointerSizeLog2),
- JSObject::kMaxInstanceSize);
- *in_object_properties = ((*instance_size - header_size) >> kPointerSizeLog2) -
- requested_embedder_fields;
+ header_size +
+ ((requested_embedder_fields + *in_object_properties) << kPointerSizeLog2);
+ CHECK_EQ(*in_object_properties,
+ ((*instance_size - header_size) >> kPointerSizeLog2) -
+ requested_embedder_fields);
}
// static
@@ -13738,7 +13804,6 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
int* in_object_properties) {
Isolate* isolate = function->GetIsolate();
int expected_nof_properties = 0;
- bool result = true;
for (PrototypeIterator iter(isolate, function, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
Handle<JSReceiver> current =
@@ -13751,21 +13816,24 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
if (shared->is_compiled() ||
Compiler::Compile(func, Compiler::CLEAR_EXCEPTION)) {
DCHECK(shared->is_compiled());
- expected_nof_properties += shared->expected_nof_properties();
+ int count = shared->expected_nof_properties();
+ // Check that the estimate is sane.
+ if (expected_nof_properties <= JSObject::kMaxInObjectProperties - count) {
+ expected_nof_properties += count;
+ } else {
+ expected_nof_properties = JSObject::kMaxInObjectProperties;
+ }
} else if (!shared->is_compiled()) {
// In case there was a compilation error for the constructor we will
// throw an error during instantiation. Hence we directly return 0;
- result = false;
- break;
- }
- if (!IsDerivedConstructor(shared->kind())) {
- break;
+ return false;
}
+ if (!IsDerivedConstructor(shared->kind())) break;
}
CalculateInstanceSizeHelper(instance_type, true, requested_embedder_fields,
expected_nof_properties, instance_size,
in_object_properties);
- return result;
+ return true;
}
@@ -13804,7 +13872,7 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
- DCHECK_NE(reason, kNoReason);
+ DCHECK_NE(reason, BailoutReason::kNoReason);
set_compiler_hints(
DisabledOptimizationReasonBits::update(compiler_hints(), reason));
@@ -13833,6 +13901,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
+ shared_info->set_is_wrapped(lit->is_wrapped());
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
@@ -13941,7 +14010,7 @@ void Code::InvalidateEmbeddedObjects() {
void Code::Relocate(intptr_t delta) {
- if (trap_handler::UseTrapHandler() && is_wasm_code()) {
+ if (trap_handler::IsTrapHandlerEnabled() && is_wasm_code()) {
const int index = trap_handler_index()->value();
if (index >= 0) {
trap_handler::UpdateHandlerDataCodePointer(index, instruction_start());
@@ -14101,11 +14170,11 @@ void JSFunction::ClearTypeFeedbackInfo() {
}
}
-void Code::PrintDeoptLocation(FILE* out, Address pc) {
+void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
- if (info.deopt_reason != DeoptimizeReason::kNoReason || pos.IsKnown()) {
- PrintF(out, " ;;; deoptimize at ");
+ if (info.deopt_reason != DeoptimizeReason::kUnknown || pos.IsKnown()) {
+ PrintF(out, "%s", str);
OFStream outstr(out);
pos.Print(outstr, this);
PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
@@ -14291,9 +14360,11 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
DCHECK(Translation::BEGIN == opcode);
int frame_count = iterator.Next();
int jsframe_count = iterator.Next();
+ int update_feedback_count = iterator.Next();
os << " " << Translation::StringFor(opcode)
<< " {frame count=" << frame_count
- << ", js frame count=" << jsframe_count << "}\n";
+ << ", js frame count=" << jsframe_count
+ << ", update_feedback_count=" << update_feedback_count << "}\n";
while (iterator.HasNext() &&
Translation::BEGIN !=
@@ -14450,6 +14521,14 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
os << "{length=" << args_length << "}";
break;
}
+
+ case Translation::UPDATE_FEEDBACK: {
+ int literal_index = iterator.Next();
+ FeedbackSlot slot(iterator.Next());
+ os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
+ << "}}";
+ break;
+ }
}
os << "\n";
}
@@ -14485,8 +14564,7 @@ void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
}
}
-
-void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
+void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
@@ -14514,21 +14592,22 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
os << "address = " << static_cast<const void*>(this) << "\n";
- os << "Instructions (size = " << instruction_size() << ")\n";
+ os << "Body (size = " << instruction_size() << ")\n";
{
Isolate* isolate = GetIsolate();
int size = instruction_size();
int safepoint_offset =
- is_turbofanned() ? static_cast<int>(safepoint_table_offset()) : size;
+ has_safepoint_info() ? safepoint_table_offset() : size;
int constant_pool_offset = FLAG_enable_embedded_constant_pool
? this->constant_pool_offset()
: size;
// Stop before reaching any embedded tables
int code_size = Min(safepoint_offset, constant_pool_offset);
+ os << "Instructions (size = " << code_size << ")\n";
byte* begin = instruction_start();
byte* end = begin + code_size;
- Disassembler::Decode(isolate, &os, begin, end, this);
+ Disassembler::Decode(isolate, &os, begin, end, this, current_pc);
if (constant_pool_offset < size) {
int constant_pool_size = safepoint_offset - constant_pool_offset;
@@ -14562,7 +14641,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
os << "\n";
- if (is_turbofanned()) {
+ if (has_safepoint_info()) {
SafepointTable table(this);
os << "Safepoints (size = " << table.size() << ")\n";
for (unsigned i = 0; i < table.length(); i++) {
@@ -15057,7 +15136,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
return Nothing<bool>();
}
// 5. Let target be the value of the [[ProxyTarget]] internal slot.
- Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
// 6. Let trap be ? GetMethod(handler, "getPrototypeOf").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -15267,13 +15346,10 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
bool JSObject::WouldConvertToSlowElements(uint32_t index) {
- if (HasFastElements()) {
- Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
- uint32_t new_capacity;
- return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
- }
- return false;
+ if (!HasFastElements()) return false;
+ uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t new_capacity;
+ return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
}
@@ -16655,26 +16731,18 @@ MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
// 2. Let defaultConstructor be the intrinsic object listed in column one of
// Table 51 for exemplar.[[TypedArrayName]].
- Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
- switch (exemplar->type()) {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: { \
- default_ctor = isolate->type##_array_fun(); \
- break; \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CTOR)
-#undef TYPED_ARRAY_CTOR
- default:
- UNREACHABLE();
- }
+ Handle<JSFunction> default_ctor =
+ JSTypedArray::DefaultConstructor(isolate, exemplar);
// 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- Handle<Object> ctor;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor,
- Object::SpeciesConstructor(isolate, exemplar, default_ctor),
- JSTypedArray);
+ Handle<Object> ctor = default_ctor;
+ if (!exemplar->HasJSTypedArrayPrototype(isolate) ||
+ !isolate->IsArraySpeciesLookupChainIntact()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor,
+ Object::SpeciesConstructor(isolate, exemplar, default_ctor),
+ JSTypedArray);
+ }
// 4. Return ? TypedArrayCreate(constructor, argumentList).
return Create(isolate, ctor, argc, argv, method_name);
@@ -16682,6 +16750,10 @@ MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
+ // Regardless of whether the property is there or not invalidate
+ // Load/StoreGlobalICs that load/store through global object's prototype.
+ JSObject::InvalidatePrototypeValidityCell(*global);
+
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -17703,6 +17775,8 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
array->set(array_size++, Smi::FromInt(i));
}
+ DCHECK_EQ(array_size, length);
+
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
@@ -18924,6 +18998,13 @@ void JSArrayBuffer::FreeBackingStore() {
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
+ if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
+ // TODO(eholk): check with WasmAllocationTracker to make sure this is
+ // actually a buffer we are tracking.
+ isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
+ allocation.length);
+ }
+
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length, allocation.mode);
}
@@ -18959,7 +19040,7 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
// already been promoted.
array_buffer->set_backing_store(data);
- array_buffer->set_allocation_base(data);
+ array_buffer->set_allocation_base(allocation_base);
array_buffer->set_allocation_length(allocation_length);
if (data && !is_external) {
@@ -19179,6 +19260,14 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
details = details.set_cell_type(new_type);
cell->set_property_details(details);
+ if (new_type == PropertyCellType::kConstant ||
+ new_type == PropertyCellType::kConstantType) {
+ // Store the value now to ensure that the cell contains the constant or
+ // type information. Otherwise subsequent store operation will turn
+ // the cell to mutable.
+ cell->set_value(*value);
+ }
+
// Deopt when transitioning from a constant type.
if (!invalidate && (old_type != new_type ||
original_details.IsReadOnly() != details.IsReadOnly())) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 521c0e6554..93f4a4eb95 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -349,40 +349,44 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
+ V(ACCESS_CHECK_INFO_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESSOR_PAIR_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(CONTEXT_EXTENSION_TYPE) \
+ V(DEBUG_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(ALLOCATION_SITE_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(SCRIPT_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
- V(CONTEXT_EXTENSION_TYPE) \
- V(MODULE_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ \
V(FIXED_ARRAY_TYPE) \
- V(HASH_TABLE_TYPE) \
V(DESCRIPTOR_ARRAY_TYPE) \
+ V(HASH_TABLE_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
+ \
+ V(CELL_TYPE) \
+ V(CODE_DATA_CONTAINER_TYPE) \
V(FEEDBACK_VECTOR_TYPE) \
+ V(LOAD_HANDLER_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(CELL_TYPE) \
- V(WEAK_CELL_TYPE) \
V(PROPERTY_CELL_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
V(SMALL_ORDERED_HASH_MAP_TYPE) \
V(SMALL_ORDERED_HASH_SET_TYPE) \
- V(CODE_DATA_CONTAINER_TYPE) \
+ V(STORE_HANDLER_TYPE) \
+ V(WEAK_CELL_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -390,32 +394,34 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MODULE_NAMESPACE_TYPE) \
V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_VALUE_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
V(JS_API_OBJECT_TYPE) \
V(JS_OBJECT_TYPE) \
+ \
V(JS_ARGUMENTS_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_ERROR_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE) \
- V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
- V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_MAP_KEY_ITERATOR_TYPE) \
V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
V(JS_MAP_VALUE_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
\
ARRAY_ITERATOR_TYPE_LIST(V) \
\
@@ -531,29 +537,38 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST(V) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
+ V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
+ V(ALLOCATION_SITE, AllocationSite, allocation_site) \
+ V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
+ V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
+ V(MODULE, Module, module) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(ALLOCATION_SITE, AllocationSite, allocation_site) \
- V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
- V(SCRIPT, Script, script) \
- V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
- promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
promise_reaction_job_info) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
- V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
+ promise_resolve_thenable_job_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(SCRIPT, Script, script) \
+ V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3) \
- V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
- V(MODULE, Module, module) \
- V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request)
+ V(TUPLE3, Tuple3, tuple3)
+
+#define DATA_HANDLER_LIST(V) \
+ V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
+ V(LOAD_HANDLER, LoadHandler, 2, load_handler2) \
+ V(LOAD_HANDLER, LoadHandler, 3, load_handler3) \
+ V(STORE_HANDLER, StoreHandler, 0, store_handler0) \
+ V(STORE_HANDLER, StoreHandler, 1, store_handler1) \
+ V(STORE_HANDLER, StoreHandler, 2, store_handler2) \
+ V(STORE_HANDLER, StoreHandler, 3, store_handler3)
// We use the full 16 bits of the instance_type field to encode heap object
// instance types. All the high-order bits (bit 7-15) are cleared if the object
@@ -704,80 +719,90 @@ enum InstanceType : uint16_t {
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
+ ACCESS_CHECK_INFO_TYPE,
ACCESSOR_INFO_TYPE,
ACCESSOR_PAIR_TYPE,
- ACCESS_CHECK_INFO_TYPE,
- INTERCEPTOR_INFO_TYPE,
+ ALIASED_ARGUMENTS_ENTRY_TYPE,
+ ALLOCATION_MEMENTO_TYPE,
+ ALLOCATION_SITE_TYPE,
+ ASYNC_GENERATOR_REQUEST_TYPE,
+ CONTEXT_EXTENSION_TYPE,
+ DEBUG_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
+ INTERCEPTOR_INFO_TYPE,
+ MODULE_INFO_ENTRY_TYPE,
+ MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
- ALLOCATION_SITE_TYPE,
- ALLOCATION_MEMENTO_TYPE,
- SCRIPT_TYPE,
- ALIASED_ARGUMENTS_ENTRY_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
- DEBUG_INFO_TYPE,
- STACK_FRAME_INFO_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROTOTYPE_INFO_TYPE,
+ SCRIPT_TYPE,
+ STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
- CONTEXT_EXTENSION_TYPE,
- MODULE_TYPE,
- MODULE_INFO_ENTRY_TYPE,
- ASYNC_GENERATOR_REQUEST_TYPE,
+
+ // FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
- HASH_TABLE_TYPE,
DESCRIPTOR_ARRAY_TYPE,
+ HASH_TABLE_TYPE,
TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
+
+ // Misc.
+ CELL_TYPE,
+ CODE_DATA_CONTAINER_TYPE,
FEEDBACK_VECTOR_TYPE,
+ LOAD_HANDLER_TYPE,
PROPERTY_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- CELL_TYPE,
- WEAK_CELL_TYPE,
PROPERTY_CELL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
SMALL_ORDERED_HASH_MAP_TYPE,
SMALL_ORDERED_HASH_SET_TYPE,
- CODE_DATA_CONTAINER_TYPE,
+ STORE_HANDLER_TYPE,
+ WEAK_CELL_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
// compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
- JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
- JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
+ // Some of the following instance types are exposed in v8.h, so to not
+ // unnecessarily change the ABI when we introduce new instance types in the
+ // future, we leave some space between instance types.
+ JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE
+ JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
JS_GLOBAL_PROXY_TYPE,
JS_MODULE_NAMESPACE_TYPE,
// Like JS_API_OBJECT_TYPE, but requires access checks and/or has
// interceptors.
- JS_SPECIAL_API_OBJECT_TYPE, // LAST_SPECIAL_RECEIVER_TYPE
- JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
- JS_MESSAGE_OBJECT_TYPE,
- JS_DATE_TYPE,
+ JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
+ JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
// Like JS_OBJECT_TYPE, but created from API function.
- JS_API_OBJECT_TYPE,
+ JS_API_OBJECT_TYPE = 0x0420,
JS_OBJECT_TYPE,
JS_ARGUMENTS_TYPE,
+ JS_ARRAY_BUFFER_TYPE,
+ JS_ARRAY_TYPE,
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
+ JS_ASYNC_GENERATOR_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JS_DATE_TYPE,
+ JS_ERROR_TYPE,
JS_GENERATOR_OBJECT_TYPE,
- JS_ASYNC_GENERATOR_OBJECT_TYPE,
- JS_ARRAY_TYPE,
- JS_ARRAY_BUFFER_TYPE,
- JS_TYPED_ARRAY_TYPE,
- JS_DATA_VIEW_TYPE,
- JS_SET_TYPE,
JS_MAP_TYPE,
- JS_SET_KEY_VALUE_ITERATOR_TYPE,
- JS_SET_VALUE_ITERATOR_TYPE,
JS_MAP_KEY_ITERATOR_TYPE,
JS_MAP_KEY_VALUE_ITERATOR_TYPE,
JS_MAP_VALUE_ITERATOR_TYPE,
- JS_WEAK_MAP_TYPE,
- JS_WEAK_SET_TYPE,
+ JS_MESSAGE_OBJECT_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
- JS_ERROR_TYPE,
- JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
+ JS_SET_TYPE,
+ JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ JS_SET_VALUE_ITERATOR_TYPE,
JS_STRING_ITERATOR_TYPE,
+ JS_WEAK_MAP_TYPE,
+ JS_WEAK_SET_TYPE,
+
+ JS_TYPED_ARRAY_TYPE,
+ JS_DATA_VIEW_TYPE,
#define ARRAY_ITERATOR_TYPE(type) type,
ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_TYPE)
@@ -857,55 +882,6 @@ STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
InstanceType instance_type);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
- V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
- V(BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE) \
- V(CODE_STUBS_TABLE_SUB_TYPE) \
- V(COMPILATION_CACHE_TABLE_SUB_TYPE) \
- V(CONTEXT_SUB_TYPE) \
- V(COPY_ON_WRITE_SUB_TYPE) \
- V(DEOPTIMIZATION_DATA_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE) \
- V(EMBEDDED_OBJECT_SUB_TYPE) \
- V(ENUM_CACHE_SUB_TYPE) \
- V(ENUM_INDICES_CACHE_SUB_TYPE) \
- V(DEPENDENT_CODE_SUB_TYPE) \
- V(DICTIONARY_ELEMENTS_SUB_TYPE) \
- V(DICTIONARY_PROPERTIES_SUB_TYPE) \
- V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
- V(PACKED_ELEMENTS_SUB_TYPE) \
- V(FAST_PROPERTIES_SUB_TYPE) \
- V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
- V(HANDLER_TABLE_SUB_TYPE) \
- V(JS_COLLECTION_SUB_TYPE) \
- V(JS_WEAK_COLLECTION_SUB_TYPE) \
- V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
- V(NUMBER_STRING_CACHE_SUB_TYPE) \
- V(OBJECT_TO_CODE_SUB_TYPE) \
- V(OPTIMIZED_CODE_LITERALS_SUB_TYPE) \
- V(OPTIMIZED_CODE_MAP_SUB_TYPE) \
- V(PROTOTYPE_USERS_SUB_TYPE) \
- V(REGEXP_MULTIPLE_CACHE_SUB_TYPE) \
- V(RETAINED_MAPS_SUB_TYPE) \
- V(SCOPE_INFO_SUB_TYPE) \
- V(SCRIPT_LIST_SUB_TYPE) \
- V(SERIALIZED_TEMPLATES_SUB_TYPE) \
- V(SHARED_FUNCTION_INFOS_SUB_TYPE) \
- V(SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
- V(STRING_SPLIT_CACHE_SUB_TYPE) \
- V(STRING_TABLE_SUB_TYPE) \
- V(TEMPLATE_INFO_SUB_TYPE) \
- V(FEEDBACK_METADATA_SUB_TYPE) \
- V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
-
-enum FixedArraySubInstanceType {
-#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
-#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
-};
-
// Result of an abstract relational comparison of x and y, implemented according
// to ES6 section 7.2.11 Abstract Relational Comparison.
enum class ComparisonResult {
@@ -921,6 +897,7 @@ bool ComparisonResultToBool(Operation op, ComparisonResult result);
class AbstractCode;
class AccessorPair;
class AllocationSite;
+class ByteArray;
class Cell;
class ConsString;
class DependentCode;
@@ -944,7 +921,6 @@ class RootVisitor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
-class TypeFeedbackInfo;
class FeedbackMetadata;
class FeedbackVector;
class WeakCell;
@@ -994,6 +970,7 @@ template <class C> inline bool Is(Object* obj);
V(Constructor) \
V(Context) \
V(CoverageInfo) \
+ V(DataHandler) \
V(DeoptimizationData) \
V(DependentCode) \
V(DescriptorArray) \
@@ -1062,6 +1039,7 @@ template <class C> inline bool Is(Object* obj);
V(JSWeakCollection) \
V(JSWeakMap) \
V(JSWeakSet) \
+ V(LoadHandler) \
V(Map) \
V(MapCache) \
V(ModuleInfo) \
@@ -1093,6 +1071,7 @@ template <class C> inline bool Is(Object* obj);
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SourcePositionTableWithFrameCache) \
+ V(StoreHandler) \
V(String) \
V(StringSet) \
V(StringTable) \
@@ -1105,7 +1084,6 @@ template <class C> inline bool Is(Object* obj);
V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
- V(TypeFeedbackInfo) \
V(Undetectable) \
V(UniqueName) \
V(WasmInstanceObject) \
@@ -2204,10 +2182,12 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
static const int kHashMask = PropertyArray::HashField::kMask;
@@ -2391,7 +2371,8 @@ class JSObject: public JSReceiver {
Handle<Map> new_map,
Isolate* isolate);
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static void InvalidatePrototypeChains(Map* map);
+ static Map* InvalidatePrototypeChains(Map* map);
+ static void InvalidatePrototypeValidityCell(JSGlobalObject* global);
// Updates prototype chain tracking information when an object changes its
// map from |old_map| to |new_map|.
@@ -2691,6 +2672,7 @@ class JSObject: public JSReceiver {
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
static const int kMaxInObjectProperties =
(kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
+ STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
class BodyDescriptor;
// No weak fields.
@@ -2801,414 +2783,6 @@ class JSIteratorResult: public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
};
-
-// Common superclass for FixedArrays that allow implementations to share
-// common accessors and some code paths.
-class FixedArrayBase: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length() const;
- inline void set_length(int value);
-
- // Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
-
- DECL_CAST(FixedArrayBase)
-
- static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
-
- bool IsCowArray() const;
-
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-};
-
-
-class FixedDoubleArray;
-class IncrementalMarking;
-
-
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline Object* get(int index) const;
- static inline Handle<Object> get(FixedArray* array, int index,
- Isolate* isolate);
- template <class T>
- MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
-
- template <class T>
- Handle<T> GetValueChecked(Isolate* isolate, int index) const;
-
- // Return a grown copy if the index is bigger than the array's length.
- static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value);
-
- // Setter that uses write barrier.
- inline void set(int index, Object* value);
- inline bool is_the_hole(Isolate* isolate, int index);
-
- // Setter that doesn't need write barrier.
- inline void set(int index, Smi* value);
- // Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- // Setters for frequently used oddballs located in old space.
- inline void set_undefined(int index);
- inline void set_undefined(Isolate* isolate, int index);
- inline void set_null(int index);
- inline void set_null(Isolate* isolate, int index);
- inline void set_the_hole(int index);
- inline void set_the_hole(Isolate* isolate, int index);
-
- inline Object** GetFirstElementAddress();
- inline bool ContainsOnlySmisOrHoles();
-
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
- inline void FillWithHoles(int from, int to);
-
- // Shrink length and insert filler objects.
- void Shrink(int length);
-
- // Copy a sub array from the receiver to dest.
- void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
-
- // Garbage collection support.
- static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
- }
-
- // Code Generation support.
- static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- // Garbage collection support.
- inline Object** RawFieldOfElementAt(int index);
-
- DECL_CAST(FixedArray)
-
- // Maximal allowed size, in bytes, of a single FixedArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 128 * MB * kPointerSize;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
- // Maximally allowed length for regular (non large object space) object.
- STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
- static const int kMaxRegularLength =
- (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
-
- // Dispatched behavior.
- DECL_PRINTER(FixedArray)
- DECL_VERIFIER(FixedArray)
-#ifdef DEBUG
- // Checks if two FixedArrays have identical contents.
- bool IsEqualTo(FixedArray* other);
-#endif
-
- typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- protected:
- // Set operation on FixedArray without using write barriers. Can
- // only be used for storing old space objects or smis.
- static inline void NoWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
- private:
- STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
-};
-
-// FixedArray alias added only because of IsFixedArrayExact() predicate, which
-// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
-// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
-class FixedArrayExact final : public FixedArray {
- public:
- DECL_CAST(FixedArrayExact)
-};
-
-// FixedDoubleArray describes fixed-sized arrays with element type double.
-class FixedDoubleArray: public FixedArrayBase {
- public:
- // Setter and getter for elements.
- inline double get_scalar(int index);
- inline uint64_t get_representation(int index);
- static inline Handle<Object> get(FixedDoubleArray* array, int index,
- Isolate* isolate);
- inline void set(int index, double value);
- inline void set_the_hole(Isolate* isolate, int index);
- inline void set_the_hole(int index);
-
- // Checking for the hole.
- inline bool is_the_hole(Isolate* isolate, int index);
- inline bool is_the_hole(int index);
-
- // Garbage collection support.
- inline static int SizeFor(int length) {
- return kHeaderSize + length * kDoubleSize;
- }
-
- // Gives access to raw memory which stores the array's data.
- inline double* data_start();
-
- inline void FillWithHoles(int from, int to);
-
- // Code Generation support.
- static int OffsetOfElementAt(int index) { return SizeFor(index); }
-
- DECL_CAST(FixedDoubleArray)
-
- // Maximal allowed size, in bytes, of a single FixedDoubleArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 512 * MB;
- // Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
-
- // Dispatched behavior.
- DECL_PRINTER(FixedDoubleArray)
- DECL_VERIFIER(FixedDoubleArray)
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
-};
-
-class WeakFixedArray : public FixedArray {
- public:
- // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
- // This function does not check if the value exists already, callers must
- // ensure this themselves if necessary.
- static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
- Handle<HeapObject> value,
- int* assigned_index = nullptr);
-
- // Returns true if an entry was found and removed.
- bool Remove(Handle<HeapObject> value);
-
- class NullCallback {
- public:
- static void Callback(Object* value, int old_index, int new_index) {}
- };
-
- template <class CompactionCallback>
- void Compact();
-
- inline Object* Get(int index) const;
- inline void Clear(int index);
- inline int Length() const;
-
- inline bool IsEmptySlot(int index) const;
- static Object* Empty() { return Smi::kZero; }
-
- class Iterator {
- public:
- explicit Iterator(Object* maybe_array) : list_(nullptr) {
- Reset(maybe_array);
- }
- void Reset(Object* maybe_array);
-
- template <class T>
- inline T* Next();
-
- private:
- int index_;
- WeakFixedArray* list_;
-#ifdef DEBUG
- int last_used_index_;
- DisallowHeapAllocation no_gc_;
-#endif // DEBUG
- DISALLOW_COPY_AND_ASSIGN(Iterator);
- };
-
- DECL_CAST(WeakFixedArray)
-
- private:
- static const int kLastUsedIndexIndex = 0;
- static const int kFirstIndex = 1;
-
- static Handle<WeakFixedArray> Allocate(
- Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
-
- static void Set(Handle<WeakFixedArray> array, int index,
- Handle<HeapObject> value);
- inline void clear(int index);
-
- inline int last_used_index() const;
- inline void set_last_used_index(int index);
-
- // Disallow inherited setters.
- void set(int index, Smi* value);
- void set(int index, Object* value);
- void set(int index, Object* value, WriteBarrierMode mode);
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
-};
-
-// Generic array grows dynamically with O(1) amortized insertion.
-//
-// ArrayList is a FixedArray with static convenience methods for adding more
-// elements. The Length() method returns the number of elements in the list, not
-// the allocated size. The number of elements is stored at kLengthIndex and is
-// updated with every insertion. The elements of the ArrayList are stored in the
-// underlying FixedArray starting at kFirstIndex.
-class ArrayList : public FixedArray {
- public:
- enum AddMode {
- kNone,
- // Use this if GC can delete elements from the array.
- kReloadLengthAfterAllocation,
- };
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
- AddMode mode = kNone);
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2, AddMode = kNone);
- static Handle<ArrayList> New(Isolate* isolate, int size);
-
- // Returns the number of elements in the list, not the allocated size, which
- // is length(). Lower and upper case length() return different results!
- inline int Length() const;
-
- // Sets the Length() as used by Elements(). Does not change the underlying
- // storage capacity, i.e., length().
- inline void SetLength(int length);
- inline Object* Get(int index) const;
- inline Object** Slot(int index);
-
- // Set the element at index to obj. The underlying array must be large enough.
- // If you need to grow the ArrayList, use the static Add() methods instead.
- inline void Set(int index, Object* obj,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Set the element at index to undefined. This does not change the Length().
- inline void Clear(int index, Object* undefined);
-
- // Return a copy of the list of size Length() without the first entry. The
- // number returned by Length() is stored in the first entry.
- static Handle<FixedArray> Elements(Handle<ArrayList> array);
- bool IsFull();
- DECL_CAST(ArrayList)
-
- private:
- static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
- static const int kLengthIndex = 0;
- static const int kFirstIndex = 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
-};
-
-enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
-
-template <SearchMode search_mode, typename T>
-inline int Search(T* array, Name* name, int valid_entries = 0,
- int* out_insertion_index = nullptr);
-
-// ByteArray represents fixed sized byte arrays. Used for the relocation info
-// that is attached to code objects.
-class ByteArray: public FixedArrayBase {
- public:
- inline int Size();
-
- // Setter and getter.
- inline byte get(int index) const;
- inline void set(int index, byte value);
-
- // Copy in / copy out whole byte slices.
- inline void copy_out(int index, byte* buffer, int length);
- inline void copy_in(int index, const byte* buffer, int length);
-
- // Treat contents as an int array.
- inline int get_int(int index) const;
- inline void set_int(int index, int value);
-
- inline uint32_t get_uint32(int index) const;
- inline void set_uint32(int index, uint32_t value);
-
- // Clear uninitialized padding space. This ensures that the snapshot content
- // is deterministic.
- inline void clear_padding();
-
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length);
- }
- // We use byte arrays for free blocks in the heap. Given a desired size in
- // bytes that is a multiple of the word size and big enough to hold a byte
- // array, this function returns the number of elements a byte array should
- // have.
- static int LengthFor(int size_in_bytes) {
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
- DCHECK_GE(size_in_bytes, kHeaderSize);
- return size_in_bytes - kHeaderSize;
- }
-
- // Returns data start address.
- inline Address GetDataStartAddress();
-
- inline int DataSize() const;
-
- // Returns a pointer to the ByteArray object for a given data start address.
- static inline ByteArray* FromDataStartAddress(Address address);
-
- DECL_CAST(ByteArray)
-
- // Dispatched behavior.
- inline int ByteArraySize();
- DECL_PRINTER(ByteArray)
- DECL_VERIFIER(ByteArray)
-
- // Layout description.
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- // Maximal memory consumption for a single ByteArray.
- static const int kMaxSize = 512 * MB;
- // Maximal length of a single ByteArray.
- static const int kMaxLength = kMaxSize - kHeaderSize;
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
-};
-
-// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
-// as they can be copied with memcpy.
-template <class T>
-class PodArray : public ByteArray {
- public:
- static Handle<PodArray<T>> New(Isolate* isolate, int length,
- PretenureFlag pretenure = NOT_TENURED);
- void copy_out(int index, T* result) {
- ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
- sizeof(T));
- }
- T get(int index) {
- T result;
- copy_out(index, &result);
- return result;
- }
- void set(int index, const T& value) {
- copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
- sizeof(T));
- }
- int length() { return ByteArray::length() / sizeof(T); }
- DECL_CAST(PodArray<T>)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
-};
-
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
@@ -3245,136 +2819,6 @@ class FreeSpace: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
};
-
-// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4) \
- V(Float32, float32, FLOAT32, float, 4) \
- V(Float64, float64, FLOAT64, double, 8) \
- V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
-
-
-class FixedTypedArrayBase: public FixedArrayBase {
- public:
- // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object)
-
- // [external_pointer]: Contains the offset between base_pointer and the start
- // of the data. If the base_pointer is a nullptr, the external_pointer
- // therefore points to the actual backing store.
- DECL_ACCESSORS(external_pointer, void)
-
- // Dispatched behavior.
- DECL_CAST(FixedTypedArrayBase)
-
- static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
- static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
- static const int kHeaderSize =
- DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
-
- static const int kDataOffset = kHeaderSize;
-
- static const int kMaxElementSize = 8;
-
-#ifdef V8_HOST_ARCH_32_BIT
- static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
-#else
- static const size_t kMaxByteLength =
- static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
-#endif // V8_HOST_ARCH_32_BIT
-
- static const size_t kMaxLength = Smi::kMaxValue;
-
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- inline int size() const;
-
- static inline int TypedArraySize(InstanceType type, int length);
- inline int TypedArraySize(InstanceType type) const;
-
- // Use with care: returns raw pointer into heap.
- inline void* DataPtr();
-
- inline int DataSize() const;
-
- inline size_t ByteLength() const;
-
- private:
- static inline int ElementSize(InstanceType type);
-
- inline int DataSize(InstanceType type) const;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
-};
-
-
-template <class Traits>
-class FixedTypedArray: public FixedTypedArrayBase {
- public:
- typedef typename Traits::ElementType ElementType;
- static const InstanceType kInstanceType = Traits::kInstanceType;
-
- DECL_CAST(FixedTypedArray<Traits>)
-
- inline ElementType get_scalar(int index);
- static inline Handle<Object> get(FixedTypedArray* array, int index);
- inline void set(int index, ElementType value);
-
- static inline ElementType from(int value);
- static inline ElementType from(uint32_t value);
- static inline ElementType from(double value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- inline void SetValue(uint32_t index, Object* value);
-
- DECL_PRINTER(FixedTypedArray)
- DECL_VERIFIER(FixedTypedArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
-};
-
-#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
- STATIC_ASSERT(size <= FixedTypedArrayBase::kMaxElementSize); \
- class Type##ArrayTraits { \
- public: /* NOLINT */ \
- typedef elementType ElementType; \
- static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* Designator() { return #type " array"; } \
- static inline Handle<Object> ToHandle(Isolate* isolate, \
- elementType scalar); \
- static inline elementType defaultValue(); \
- }; \
- \
- typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
-
-TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
-
-#undef FIXED_TYPED_ARRAY_TRAITS
-
-class TemplateList : public FixedArray {
- public:
- static Handle<TemplateList> New(Isolate* isolate, int size);
- inline int length() const;
- inline Object* get(int index) const;
- inline void set(int index, Object* value);
- static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
- Handle<Object> value);
- DECL_CAST(TemplateList)
- private:
- static const int kLengthIndex = 0;
- static const int kFirstElementIndex = kLengthIndex + 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
-};
-
class PrototypeInfo;
// An abstract superclass, a marker class really, for simple structure classes.
@@ -3779,6 +3223,7 @@ enum BuiltinFunctionId {
kArrayKeys,
kArrayValues,
kArrayIteratorNext,
+ kBigIntConstructor,
kMapSize,
kSetSize,
kMapIteratorNext,
@@ -3795,6 +3240,8 @@ enum BuiltinFunctionId {
kGlobalUnescape,
kGlobalIsFinite,
kGlobalIsNaN,
+ kNumberConstructor,
+ kSymbolConstructor,
kTypedArrayByteLength,
kTypedArrayByteOffset,
kTypedArrayEntries,
@@ -3803,6 +3250,7 @@ enum BuiltinFunctionId {
kTypedArrayToStringTag,
kTypedArrayValues,
kSharedArrayBufferByteLength,
+ kStringConstructor,
kStringIterator,
kStringIteratorNext,
kStringToLowerCaseIntl,
@@ -4470,48 +3918,6 @@ class JSPromise : public JSObject {
STATIC_ASSERT(v8::Promise::kRejected == 2);
};
-class TypeFeedbackInfo : public Tuple3 {
- public:
- inline int ic_total_count();
- inline void set_ic_total_count(int count);
-
- inline int ic_with_type_info_count();
- inline void change_ic_with_type_info_count(int delta);
-
- inline int ic_generic_count();
- inline void change_ic_generic_count(int delta);
-
- inline void initialize_storage();
-
- inline void change_own_type_change_checksum();
- inline int own_type_change_checksum();
-
- inline void set_inlined_type_change_checksum(int checksum);
- inline bool matches_inlined_type_change_checksum(int checksum);
-
- DECL_CAST(TypeFeedbackInfo)
-
- static const int kStorage1Offset = kValue1Offset;
- static const int kStorage2Offset = kValue2Offset;
- static const int kStorage3Offset = kValue3Offset;
-
- private:
- static const int kTypeChangeChecksumBits = 7;
-
- class ICTotalCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class OwnTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
- class ICsWithTypeInfoCountField: public BitField<int, 0,
- kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
- class InlinedTypeChangeChecksum: public BitField<int,
- kSmiValueSize - kTypeChangeChecksumBits,
- kTypeChangeChecksumBits> {}; // NOLINT
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
-};
-
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -4904,7 +4310,7 @@ class JSProxy: public JSReceiver {
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
// [target]: The target property.
- DECL_ACCESSORS(target, JSReceiver)
+ DECL_ACCESSORS(target, Object)
static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
@@ -5015,58 +4421,23 @@ class JSProxy: public JSReceiver {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
-
-class JSCollection : public JSObject {
- public:
- // [table]: the backing hash table
- DECL_ACCESSORS(table, Object)
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
-};
-
-
-// The JSSet describes EcmaScript Harmony sets
-// TODO(marja): When moving JSSet out of objects.h, move JSSetIterator (from
-// objects/hash-table.h) into the same file.
-class JSSet : public JSCollection {
- public:
- DECL_CAST(JSSet)
-
- static void Initialize(Handle<JSSet> set, Isolate* isolate);
- static void Clear(Handle<JSSet> set);
-
- // Dispatched behavior.
- DECL_PRINTER(JSSet)
- DECL_VERIFIER(JSSet)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
-};
-
-
-// The JSMap describes EcmaScript Harmony maps
-// TODO(marja): When moving JSMap out of objects.h, move JSMapIterator (from
-// objects/hash-table.h) into the same file.
-class JSMap : public JSCollection {
+// JSProxyRevocableResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "proxy" and "revoke".
+// See https://tc39.github.io/ecma262/#sec-proxy.revocable
+class JSProxyRevocableResult : public JSObject {
public:
- DECL_CAST(JSMap)
-
- static void Initialize(Handle<JSMap> map, Isolate* isolate);
- static void Clear(Handle<JSMap> map);
-
- // Dispatched behavior.
- DECL_PRINTER(JSMap)
- DECL_VERIFIER(JSMap)
+ // Offsets of object fields.
+ static const int kProxyOffset = JSObject::kHeaderSize;
+ static const int kRevokeOffset = kProxyOffset + kPointerSize;
+ static const int kSize = kRevokeOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kProxyIndex = 0;
+ static const int kRevokeIndex = 1;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxyRevocableResult);
};
-
// The [Async-from-Sync Iterator] object
// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
// An object which wraps an ordinary Iterator and converts it to behave
@@ -5116,82 +4487,13 @@ class JSStringIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
};
-// Base class for both JSWeakMap and JSWeakSet
-class JSWeakCollection: public JSObject {
- public:
- DECL_CAST(JSWeakCollection)
-
- // [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, Object)
-
- // [next]: linked list of encountered weak maps during GC.
- DECL_ACCESSORS(next, Object)
-
- static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
- static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
- Handle<Object> value, int32_t hash);
- static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
- int32_t hash);
- static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
- int max_entries);
-
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kTableOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
-
- // Visiting policy defines whether the table and next collection fields
- // should be visited or not.
- enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
-
- // Iterates the function object according to the visiting policy.
- template <BodyVisitingPolicy>
- class BodyDescriptorImpl;
-
- // Visit the whole object.
- typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
-
- // Don't visit table and next collection fields.
- typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
-};
-
-
-// The JSWeakMap describes EcmaScript Harmony weak maps
-class JSWeakMap: public JSWeakCollection {
- public:
- DECL_CAST(JSWeakMap)
-
- // Dispatched behavior.
- DECL_PRINTER(JSWeakMap)
- DECL_VERIFIER(JSWeakMap)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
-};
-
-
-// The JSWeakSet describes EcmaScript Harmony weak sets
-class JSWeakSet: public JSWeakCollection {
- public:
- DECL_CAST(JSWeakSet)
-
- // Dispatched behavior.
- DECL_PRINTER(JSWeakSet)
- DECL_VERIFIER(JSWeakSet)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
-};
-
-
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign: public HeapObject {
public:
// [address]: field containing the address.
inline Address foreign_address();
- inline void set_foreign_address(Address value);
+
+ static inline bool IsNormalized(Object* object);
DECL_CAST(Foreign)
@@ -5211,6 +4513,12 @@ class Foreign: public HeapObject {
typedef BodyDescriptor BodyDescriptorWeak;
private:
+ friend class Heap;
+ friend class SerializerDeserializer;
+ friend class StartupSerializer;
+
+ inline void set_foreign_address(Address value);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
@@ -5230,6 +4538,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(expected_receiver_type, Object)
// This directly points at a foreign C function to be used from the runtime.
DECL_ACCESSORS(getter, Object)
+ inline bool has_getter();
DECL_ACCESSORS(setter, Object)
// This either points at the same as above, or a trampoline in case we are
// running with the simulator. Use these entries from generated code.
@@ -5394,6 +4703,7 @@ class InterceptorInfo: public Struct {
DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
DECL_BOOLEAN_ACCESSORS(all_can_read)
DECL_BOOLEAN_ACCESSORS(non_masking)
+ DECL_BOOLEAN_ACCESSORS(is_named)
inline int flags() const;
inline void set_flags(int flags);
@@ -5418,6 +4728,7 @@ class InterceptorInfo: public Struct {
static const int kCanInterceptSymbolsBit = 0;
static const int kAllCanReadBit = 1;
static const int kNonMasking = 2;
+ static const int kNamed = 3;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 85424600c0..df5f854395 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -185,7 +185,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length) {
result->set_length(length);
result->set_sign(false);
#if DEBUG
- result->InitializeDigits(length, 0xbf);
+ result->InitializeDigits(length, 0xBF);
#endif
return result;
}
@@ -304,7 +304,71 @@ MaybeHandle<BigInt> BigInt::BitwiseNot(Handle<BigInt> x) {
MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
Handle<BigInt> exponent) {
- UNIMPLEMENTED(); // TODO(jkummerow): Implement.
+ Isolate* isolate = base->GetIsolate();
+ // 1. If exponent is < 0, throw a RangeError exception.
+ if (exponent->sign()) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kBigIntNegativeExponent),
+ BigInt);
+ }
+ // 2. If base is 0n and exponent is 0n, return 1n.
+ if (exponent->is_zero()) {
+ return MutableBigInt::NewFromInt(isolate, 1);
+ }
+ // 3. Return a BigInt representing the mathematical value of base raised
+ // to the power exponent.
+ if (base->is_zero()) return base;
+ if (base->length() == 1 && base->digit(0) == 1) return base;
+ // For all bases >= 2, very large exponents would lead to unrepresentable
+ // results.
+ STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
+ if (exponent->length() > 1) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ digit_t exp_value = exponent->digit(0);
+ if (exp_value == 1) return base;
+ if (exp_value >= kMaxLengthBits) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ STATIC_ASSERT(kMaxLengthBits <= kMaxInt);
+ int n = static_cast<int>(exp_value);
+ if (base->length() == 1 && base->digit(0) == 2) {
+ // Fast path for 2^n.
+ int needed_digits = 1 + (n / kDigitBits);
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, needed_digits).ToHandleChecked();
+ result->InitializeDigits(needed_digits);
+ // All bits are zero. Now set the n-th bit.
+ digit_t msd = static_cast<digit_t>(1) << (n % kDigitBits);
+ result->set_digit(needed_digits - 1, msd);
+ // Result is negative for odd powers of -2n.
+ if (base->sign()) result->set_sign((n & 1) != 0);
+ return MutableBigInt::MakeImmutable(result);
+ }
+ Handle<BigInt> result;
+ Handle<BigInt> running_square = base;
+ // This implicitly sets the result's sign correctly.
+ if (n & 1) result = base;
+ n >>= 1;
+ for (; n != 0; n >>= 1) {
+ if (!Multiply(running_square, running_square).ToHandle(&running_square)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
+ BigInt);
+ }
+ if (n & 1) {
+ if (result.is_null()) {
+ result = running_square;
+ } else {
+ if (!Multiply(result, running_square).ToHandle(&result)) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kBigIntTooBig), BigInt);
+ }
+ }
+ }
+ }
+ return result;
}
MaybeHandle<BigInt> BigInt::Multiply(Handle<BigInt> x, Handle<BigInt> y) {
@@ -1617,8 +1681,8 @@ Handle<BigInt> MutableBigInt::RightShiftByMaximum(Isolate* isolate, bool sign) {
Maybe<BigInt::digit_t> MutableBigInt::ToShiftAmount(Handle<BigIntBase> x) {
if (x->length() > 1) return Nothing<digit_t>();
digit_t value = x->digit(0);
- STATIC_ASSERT(kMaxLength * kDigitBits < std::numeric_limits<digit_t>::max());
- if (value > kMaxLength * kDigitBits) return Nothing<digit_t>();
+ STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
+ if (value > kMaxLengthBits) return Nothing<digit_t>();
return Just(value);
}
@@ -1864,12 +1928,13 @@ Handle<BigInt> BigInt::AsIntN(uint64_t n, Handle<BigInt> x) {
if (x->is_zero()) return x;
if (n == 0) return MutableBigInt::Zero(x->GetIsolate());
uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
+ uint64_t x_length = static_cast<uint64_t>(x->length());
// If {x} has less than {n} bits, return it directly.
- if (static_cast<uint64_t>(x->length()) < needed_length) return x;
+ if (x_length < needed_length) return x;
DCHECK_LE(needed_length, kMaxInt);
digit_t top_digit = x->digit(static_cast<int>(needed_length) - 1);
digit_t compare_digit = static_cast<digit_t>(1) << ((n - 1) % kDigitBits);
- if (top_digit < compare_digit) return x;
+ if (x_length == needed_length && top_digit < compare_digit) return x;
// Otherwise we have to truncate (which is a no-op in the special case
// of x == -2^(n-1)), and determine the right sign. We also might have
// to subtract from 2^n to simulate having two's complement representation.
@@ -1946,8 +2011,11 @@ Handle<BigInt> MutableBigInt::TruncateToNBits(int n, Handle<BigInt> x) {
// The MSD might contain extra bits that we don't want.
digit_t msd = x->digit(last);
- int drop = kDigitBits - (n % kDigitBits);
- result->set_digit(last, (msd << drop) >> drop);
+ if (n % kDigitBits != 0) {
+ int drop = kDigitBits - (n % kDigitBits);
+ msd = (msd << drop) >> drop;
+ }
+ result->set_digit(last, msd);
result->set_sign(x->sign());
return MakeImmutable(result);
}
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index de0daf495e..9e29a69b3b 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -24,13 +24,19 @@ class BigIntBase : public HeapObject {
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
- // The maximum length that the current implementation supports would be
- // kMaxInt / kDigitBits. However, we use a lower limit for now, because
- // raising it later is easier than lowering it.
- // Support up to 1 million bits.
- static const int kMaxLengthBits = 1024 * 1024;
+ // Increasing kMaxLength will require code changes.
+ static const int kMaxLengthBits = kMaxInt - kPointerSize * kBitsPerByte - 1;
static const int kMaxLength = kMaxLengthBits / (kPointerSize * kBitsPerByte);
+ static const int kLengthFieldBits = 30;
+ STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
+ class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
+ class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
+
+ static const int kBitfieldOffset = HeapObject::kHeaderSize;
+ static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
+ static const int kHeaderSize = kDigitsOffset;
+
private:
friend class BigInt;
friend class MutableBigInt;
@@ -44,15 +50,6 @@ class BigIntBase : public HeapObject {
static const int kHalfDigitBits = kDigitBits / 2;
static const digit_t kHalfDigitMask = (1ull << kHalfDigitBits) - 1;
- static const int kBitfieldOffset = HeapObject::kHeaderSize;
- static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
- static const int kHeaderSize = kDigitsOffset;
-
- static const int kLengthFieldBits = 20;
- STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
- class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
- class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
-
// sign() == true means negative.
inline bool sign() const {
intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 17cfa4f67b..4c3e7f0d97 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -290,14 +290,14 @@ Code::Kind Code::kind() const {
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
bool is_turbofanned, int stack_slots) {
- CHECK_LE(stack_slots, StackSlotsField::kMax);
- DCHECK_IMPLIES(stack_slots != 0, is_turbofanned);
+ CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
static_assert(Code::NUMBER_OF_KINDS <= KindField::kMax + 1, "field overflow");
uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots);
WRITE_UINT32_FIELD(this, kFlagsOffset, flags);
+ DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
inline bool Code::is_interpreter_trampoline_builtin() const {
@@ -411,21 +411,25 @@ void Code::set_builtin_index(int index) {
bool Code::is_builtin() const { return builtin_index() != -1; }
-unsigned Code::stack_slots() const {
- DCHECK(is_turbofanned());
+bool Code::has_safepoint_info() const {
+ return is_turbofanned() || is_wasm_code();
+}
+
+int Code::stack_slots() const {
+ DCHECK(has_safepoint_info());
return StackSlotsField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
-unsigned Code::safepoint_table_offset() const {
- DCHECK(is_turbofanned());
- return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+int Code::safepoint_table_offset() const {
+ DCHECK(has_safepoint_info());
+ return READ_INT32_FIELD(this, kSafepointTableOffsetOffset);
}
-void Code::set_safepoint_table_offset(unsigned offset) {
- CHECK(offset <= std::numeric_limits<uint32_t>::max());
- DCHECK(is_turbofanned() || offset == 0); // Allow zero initialization.
+void Code::set_safepoint_table_offset(int offset) {
+ CHECK_LE(0, offset);
+ DCHECK(has_safepoint_info() || offset == 0); // Allow zero initialization.
DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+ WRITE_INT32_FIELD(this, kSafepointTableOffsetOffset, offset);
}
bool Code::marked_for_deoptimization() const {
@@ -635,6 +639,14 @@ ByteArray* BytecodeArray::SourcePositionTable() {
->source_position_table();
}
+void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return;
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table());
+}
+
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
int BytecodeArray::SizeIncludingMetadata() {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index bb447ce2dd..c43e07c1f9 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_CODE_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -138,7 +139,8 @@ class Code : public HeapObject {
#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
#ifdef ENABLE_DISASSEMBLER
- void Disassemble(const char* name, std::ostream& os); // NOLINT
+ void Disassemble(const char* name, std::ostream& os,
+ void* current_pc = nullptr); // NOLINT
#endif
// [instruction_size]: Size of the native instructions
@@ -232,14 +234,16 @@ class Code : public HeapObject {
inline void set_builtin_index(int id);
inline bool is_builtin() const;
- // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
+ inline bool has_safepoint_info() const;
+
+ // [stack_slots]: If {has_safepoint_info()}, the number of stack slots
// reserved in the code prologue.
- inline unsigned stack_slots() const;
+ inline int stack_slots() const;
- // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
- // the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset() const;
- inline void set_safepoint_table_offset(unsigned offset);
+ // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
+ // instruction stream where the safepoint table starts.
+ inline int safepoint_table_offset() const;
+ inline void set_safepoint_table_offset(int offset);
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
@@ -386,7 +390,7 @@ class Code : public HeapObject {
DECL_PRINTER(Code)
DECL_VERIFIER(Code)
- void PrintDeoptLocation(FILE* out, Address pc);
+ void PrintDeoptLocation(FILE* out, const char* str, Address pc);
bool CanDeoptAt(Address pc);
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
@@ -790,6 +794,7 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(source_position_table, Object)
inline ByteArray* SourcePositionTable();
+ inline void ClearFrameCacheFromSourcePositionTable();
DECL_CAST(BytecodeArray)
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
new file mode 100644
index 0000000000..40c3658e60
--- /dev/null
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DATA_HANDLER_INL_H_
+#define V8_DATA_HANDLER_INL_H_
+
+#include "src/objects/data-handler.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+bool HeapObject::IsDataHandler() const {
+ return IsLoadHandler() || IsStoreHandler();
+}
+
+CAST_ACCESSOR(DataHandler)
+
+ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
+ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
+
+int DataHandler::data_field_count() const {
+ return (map()->instance_size() - kSizeWithData0) / kPointerSize;
+}
+
+ACCESSORS_CHECKED(DataHandler, data1, Object, kData1Offset,
+ map()->instance_size() >= kSizeWithData1)
+ACCESSORS_CHECKED(DataHandler, data2, Object, kData2Offset,
+ map()->instance_size() >= kSizeWithData2)
+ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
+ map()->instance_size() >= kSizeWithData3)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DATA_HANDLER_INL_H_
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
new file mode 100644
index 0000000000..f11d00fa38
--- /dev/null
+++ b/deps/v8/src/objects/data-handler.h
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DATA_HANDLER_H_
+#define V8_DATA_HANDLER_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// DataHandler is a base class for load and store handlers that can't be
+// encoded in one Smi. Kind of a handler can be deduced from instance type.
+class DataHandler : public Struct {
+ public:
+ // [smi_handler]: A Smi which encodes a handler or Code object (we still
+ // use code handlers for accessing lexical environment variables, but soon
+ // only smi handlers will remain). See LoadHandler and StoreHandler for
+ // details about encoding.
+ DECL_ACCESSORS(smi_handler, Object)
+
+ // [validity_cell]: A validity Cell that guards prototype chain modifications.
+ DECL_ACCESSORS(validity_cell, Object)
+
+ // Returns number of optional data fields available in the object.
+ inline int data_field_count() const;
+
+ // [data1-3]: These are optional general-purpose fields whose content and
+ // presence depends on the handler kind.
+ DECL_ACCESSORS(data1, Object)
+ DECL_ACCESSORS(data2, Object)
+ DECL_ACCESSORS(data3, Object)
+
+// Layout description.
+#define DATA_HANDLER_FIELDS(V) \
+ V(kSmiHandlerOffset, kPointerSize) \
+ V(kValidityCellOffset, kPointerSize) \
+ V(kSizeWithData0, 0) \
+ V(kData1Offset, kPointerSize) \
+ V(kSizeWithData1, 0) \
+ V(kData2Offset, kPointerSize) \
+ V(kSizeWithData2, 0) \
+ V(kData3Offset, kPointerSize) \
+ V(kSizeWithData3, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, DATA_HANDLER_FIELDS)
+#undef DATA_HANDLER_FIELDS
+
+ DECL_CAST(DataHandler)
+
+ DECL_VERIFIER(DataHandler)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DATA_HANDLER_H_
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 9ee2765897..0ce134b0b3 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DEBUG_OBJECTS_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index f0b985337b..a89a31fcd5 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
new file mode 100644
index 0000000000..edca36c92e
--- /dev/null
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -0,0 +1,634 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIXED_ARRAY_INL_H_
+#define V8_OBJECTS_FIXED_ARRAY_INL_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+
+CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(WeakFixedArray)
+
+SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
+Object* FixedArrayBase::unchecked_synchronized_length() const {
+ return ACQUIRE_READ_FIELD(this, kLengthOffset);
+}
+
+ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
+
+Object** FixedArray::GetFirstElementAddress() {
+ return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+ Object* the_hole = GetHeap()->the_hole_value();
+ Object** current = GetFirstElementAddress();
+ for (int i = 0; i < length(); ++i) {
+ Object* candidate = *current++;
+ if (!candidate->IsSmi() && candidate != the_hole) return false;
+ }
+ return true;
+}
+
+Object* FixedArray::get(int index) const {
+ SLOW_DCHECK(index >= 0 && index < this->length());
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
+ return handle(array->get(index), isolate);
+}
+
+template <class T>
+MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
+ return Handle<T>(T::cast(obj), isolate);
+}
+
+template <class T>
+Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ CHECK(!obj->IsUndefined(isolate));
+ return Handle<T>(T::cast(obj), isolate);
+}
+
+bool FixedArray::is_the_hole(Isolate* isolate, int index) {
+ return get(index)->IsTheHole(isolate);
+}
+
+void FixedArray::set(int index, Smi* value) {
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_LT(index, this->length());
+ DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+}
+
+void FixedArray::set(int index, Object* value) {
+ DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
+ DCHECK(IsFixedArray());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+}
+
+void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
+ Object* value) {
+ DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, array->length());
+ DCHECK(!array->GetHeap()->InNewSpace(value));
+ RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+}
+
+void FixedArray::set_undefined(int index) {
+ set_undefined(GetIsolate(), index);
+}
+
+void FixedArray::set_undefined(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index,
+ isolate->heap()->undefined_value());
+}
+
+void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
+
+void FixedArray::set_null(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
+}
+
+void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
+
+void FixedArray::set_the_hole(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
+}
+
+void FixedArray::FillWithHoles(int from, int to) {
+ Isolate* isolate = GetIsolate();
+ for (int i = from; i < to; i++) {
+ set_the_hole(isolate, i);
+ }
+}
+
+Object** FixedArray::data_start() {
+ return HeapObject::RawField(this, kHeaderSize);
+}
+
+Object** FixedArray::RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+}
+
+double FixedDoubleArray::get_scalar(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(!is_the_hole(index));
+ return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+}
+
+uint64_t FixedDoubleArray::get_representation(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ int offset = kHeaderSize + index * kDoubleSize;
+ return READ_UINT64_FIELD(this, offset);
+}
+
+Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
+ Isolate* isolate) {
+ if (array->is_the_hole(index)) {
+ return isolate->factory()->the_hole_value();
+ } else {
+ return isolate->factory()->NewNumber(array->get_scalar(index));
+ }
+}
+
+void FixedDoubleArray::set(int index, double value) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ if (std::isnan(value)) {
+ WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
+ } else {
+ WRITE_DOUBLE_FIELD(this, offset, value);
+ }
+ DCHECK(!is_the_hole(index));
+}
+
+void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
+ set_the_hole(index);
+}
+
+void FixedDoubleArray::set_the_hole(int index) {
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
+}
+
+bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
+ return is_the_hole(index);
+}
+
+bool FixedDoubleArray::is_the_hole(int index) {
+ return get_representation(index) == kHoleNanInt64;
+}
+
+double* FixedDoubleArray::data_start() {
+ return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+void FixedDoubleArray::FillWithHoles(int from, int to) {
+ for (int i = from; i < to; i++) {
+ set_the_hole(i);
+ }
+}
+
+Object* WeakFixedArray::Get(int index) const {
+ Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
+ if (raw->IsSmi()) return raw;
+ DCHECK(raw->IsWeakCell());
+ return WeakCell::cast(raw)->value();
+}
+
+bool WeakFixedArray::IsEmptySlot(int index) const {
+ DCHECK(index < Length());
+ return Get(index)->IsSmi();
+}
+
+void WeakFixedArray::Clear(int index) {
+ FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
+}
+
+int WeakFixedArray::Length() const {
+ return FixedArray::cast(this)->length() - kFirstIndex;
+}
+
+int WeakFixedArray::last_used_index() const {
+ return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
+}
+
+void WeakFixedArray::set_last_used_index(int index) {
+ FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
+}
+
+template <class T>
+T* WeakFixedArray::Iterator::Next() {
+ if (list_ != nullptr) {
+ // Assert that list did not change during iteration.
+ DCHECK_EQ(last_used_index_, list_->last_used_index());
+ while (index_ < list_->Length()) {
+ Object* item = list_->Get(index_++);
+ if (item != Empty()) return T::cast(item);
+ }
+ list_ = nullptr;
+ }
+ return nullptr;
+}
+
+int ArrayList::Length() const {
+ if (FixedArray::cast(this)->length() == 0) return 0;
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+}
+
+void ArrayList::SetLength(int length) {
+ return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
+}
+
+Object* ArrayList::Get(int index) const {
+ return FixedArray::cast(this)->get(kFirstIndex + index);
+}
+
+Object** ArrayList::Slot(int index) {
+ return data_start() + kFirstIndex + index;
+}
+
+void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
+ FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
+}
+
+void ArrayList::Clear(int index, Object* undefined) {
+ DCHECK(undefined->IsUndefined(GetIsolate()));
+ FixedArray::cast(this)->set(kFirstIndex + index, undefined,
+ SKIP_WRITE_BARRIER);
+}
+
+int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
+byte ByteArray::get(int index) const {
+ DCHECK(index >= 0 && index < this->length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+void ByteArray::set(int index, byte value) {
+ DCHECK(index >= 0 && index < this->length());
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
+}
+
+void ByteArray::copy_in(int index, const byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
+ index + length <= this->length());
+ byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(dst_addr, buffer, length);
+}
+
+void ByteArray::copy_out(int index, byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
+ index + length <= this->length());
+ const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(buffer, src_addr, length);
+}
+
+int ByteArray::get_int(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
+ return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
+}
+
+void ByteArray::set_int(int index, int value) {
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
+ WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
+}
+
+uint32_t ByteArray::get_uint32(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
+}
+
+void ByteArray::set_uint32(int index, uint32_t value) {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
+}
+
+void ByteArray::clear_padding() {
+ int data_size = length() + kHeaderSize;
+ memset(address() + data_size, 0, Size() - data_size);
+}
+
+ByteArray* ByteArray::FromDataStartAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
+}
+
+int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
+
+int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
+
+Address ByteArray::GetDataStartAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+}
+
+template <class T>
+PodArray<T>* PodArray<T>::cast(Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<PodArray<T>*>(object);
+}
+template <class T>
+const PodArray<T>* PodArray<T>::cast(const Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<const PodArray<T>*>(object);
+}
+
+// static
+template <class T>
+Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
+ PretenureFlag pretenure) {
+ return Handle<PodArray<T>>::cast(
+ isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
+}
+
+void* FixedTypedArrayBase::external_pointer() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void FixedTypedArrayBase::set_external_pointer(void* value,
+ WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+void* FixedTypedArrayBase::DataPtr() {
+ return reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(base_pointer()) +
+ reinterpret_cast<intptr_t>(external_pointer()));
+}
+
+int FixedTypedArrayBase::ElementSize(InstanceType type) {
+ int element_size;
+ switch (type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = size; \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ }
+ return element_size;
+}
+
+int FixedTypedArrayBase::DataSize(InstanceType type) const {
+ if (base_pointer() == Smi::kZero) return 0;
+ return length() * ElementSize(type);
+}
+
+int FixedTypedArrayBase::DataSize() const {
+ return DataSize(map()->instance_type());
+}
+
+size_t FixedTypedArrayBase::ByteLength() const {
+ return static_cast<size_t>(length()) *
+ static_cast<size_t>(ElementSize(map()->instance_type()));
+}
+
+int FixedTypedArrayBase::size() const {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
+}
+
+int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
+}
+
+// static
+int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
+ return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
+}
+
+uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
+
+uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
+
+int8_t Int8ArrayTraits::defaultValue() { return 0; }
+
+uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
+
+int16_t Int16ArrayTraits::defaultValue() { return 0; }
+
+uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
+
+int32_t Int32ArrayTraits::defaultValue() { return 0; }
+
+float Float32ArrayTraits::defaultValue() {
+ return std::numeric_limits<float>::quiet_NaN();
+}
+
+double Float64ArrayTraits::defaultValue() {
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ return FixedTypedArray<Traits>::get_scalar_from_data_ptr(DataPtr(), index);
+}
+
+// static
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar_from_data_ptr(
+ void* data_ptr, int index) {
+ typename Traits::ElementType* ptr = reinterpret_cast<ElementType*>(data_ptr);
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
+ // ThreadSanitizer will catch these racy accesses and warn about them, so we
+ // disable TSAN for these reads and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ auto result = ptr[index];
+ TSAN_ANNOTATE_IGNORE_READS_END;
+ return result;
+}
+
+template <class Traits>
+void FixedTypedArray<Traits>::set(int index, ElementType value) {
+ CHECK((index >= 0) && (index < this->length()));
+ // See the comment in FixedTypedArray<Traits>::get_scalar.
+ auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
+ TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ ptr[index] = value;
+ TSAN_ANNOTATE_IGNORE_WRITES_END;
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
+ return static_cast<ElementType>(value);
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
+ return static_cast<ElementType>(value);
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
+ return static_cast<ElementType>(DoubleToInt32(value));
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+template <>
+inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
+ return static_cast<float>(value);
+}
+
+template <>
+inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
+ return value;
+}
+
+template <class Traits>
+Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
+ int index) {
+ return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
+}
+
+template <class Traits>
+void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
+ ElementType cast_value = Traits::defaultValue();
+ if (value->IsSmi()) {
+ int int_value = Smi::ToInt(value);
+ cast_value = from(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = from(double_value);
+ } else {
+ // Clamp undefined to the default value. All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined(GetIsolate()));
+ }
+ set(index, cast_value);
+}
+
+Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
+ uint8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
+ return handle(Smi::FromInt(scalar), isolate);
+}
+
+Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
+ return isolate->factory()->NewNumberFromUint(scalar);
+}
+
+Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
+ return isolate->factory()->NewNumberFromInt(scalar);
+}
+
+Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
+ return isolate->factory()->NewNumber(scalar);
+}
+
+// static
+template <class Traits>
+STATIC_CONST_MEMBER_DEFINITION const InstanceType
+ FixedTypedArray<Traits>::kInstanceType;
+
+template <class Traits>
+FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+template <class Traits>
+const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(
+ const Object* object) {
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+int TemplateList::length() const {
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+}
+
+Object* TemplateList::get(int index) const {
+ return FixedArray::cast(this)->get(kFirstElementIndex + index);
+}
+
+void TemplateList::set(int index, Object* value) {
+ FixedArray::cast(this)->set(kFirstElementIndex + index, value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FIXED_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
new file mode 100644
index 0000000000..5d78af8799
--- /dev/null
+++ b/deps/v8/src/objects/fixed-array.h
@@ -0,0 +1,601 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FIXED_ARRAY_H_
+#define V8_OBJECTS_FIXED_ARRAY_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE) \
+ V(CODE_STUBS_TABLE_SUB_TYPE) \
+ V(COMPILATION_CACHE_TABLE_SUB_TYPE) \
+ V(CONTEXT_SUB_TYPE) \
+ V(COPY_ON_WRITE_SUB_TYPE) \
+ V(DEOPTIMIZATION_DATA_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(EMBEDDED_OBJECT_SUB_TYPE) \
+ V(ENUM_CACHE_SUB_TYPE) \
+ V(ENUM_INDICES_CACHE_SUB_TYPE) \
+ V(DEPENDENT_CODE_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
+ V(PACKED_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(HANDLER_TABLE_SUB_TYPE) \
+ V(JS_COLLECTION_SUB_TYPE) \
+ V(JS_WEAK_COLLECTION_SUB_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(NUMBER_STRING_CACHE_SUB_TYPE) \
+ V(OBJECT_TO_CODE_SUB_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_SUB_TYPE) \
+ V(OPTIMIZED_CODE_MAP_SUB_TYPE) \
+ V(PROTOTYPE_USERS_SUB_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_SUB_TYPE) \
+ V(RETAINED_MAPS_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SCRIPT_LIST_SUB_TYPE) \
+ V(SERIALIZED_OBJECTS_SUB_TYPE) \
+ V(SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(STRING_SPLIT_CACHE_SUB_TYPE) \
+ V(STRING_TABLE_SUB_TYPE) \
+ V(TEMPLATE_INFO_SUB_TYPE) \
+ V(FEEDBACK_METADATA_SUB_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
+
+enum FixedArraySubInstanceType {
+#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
+};
+
+// Common superclass for FixedArrays that allow implementations to share
+// common accessors and some code paths.
+class FixedArrayBase : public HeapObject {
+ public:
+ // [length]: length of the array.
+ inline int length() const;
+ inline void set_length(int value);
+
+ // Get and set the length using acquire loads and release stores.
+ inline int synchronized_length() const;
+ inline void synchronized_set_length(int value);
+
+ inline Object* unchecked_synchronized_length() const;
+
+ DECL_CAST(FixedArrayBase)
+
+ static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
+
+ bool IsCowArray() const;
+
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+};
+
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray : public FixedArrayBase {
+ public:
+ // Setter and getter for elements.
+ inline Object* get(int index) const;
+ static inline Handle<Object> get(FixedArray* array, int index,
+ Isolate* isolate);
+ template <class T>
+ MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
+
+ template <class T>
+ Handle<T> GetValueChecked(Isolate* isolate, int index) const;
+
+ // Return a grown copy if the index is bigger than the array's length.
+ static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
+ Handle<Object> value);
+
+ // Setter that uses write barrier.
+ inline void set(int index, Object* value);
+ inline bool is_the_hole(Isolate* isolate, int index);
+
+ // Setter that doesn't need write barrier.
+ inline void set(int index, Smi* value);
+ // Setter with explicit barrier mode.
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+
+ // Setters for frequently used oddballs located in old space.
+ inline void set_undefined(int index);
+ inline void set_undefined(Isolate* isolate, int index);
+ inline void set_null(int index);
+ inline void set_null(Isolate* isolate, int index);
+ inline void set_the_hole(int index);
+ inline void set_the_hole(Isolate* isolate, int index);
+
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
+
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
+ inline void FillWithHoles(int from, int to);
+
+ // Shrink length and insert filler objects.
+ void Shrink(int length);
+
+ // Copy a sub array from the receiver to dest.
+ void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
+
+ // Code Generation support.
+ static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ // Garbage collection support.
+ inline Object** RawFieldOfElementAt(int index);
+
+ DECL_CAST(FixedArray)
+
+ // Maximal allowed size, in bytes, of a single FixedArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 128 * MB * kPointerSize;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
+ // Maximally allowed length for regular (non large object space) object.
+ STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
+ static const int kMaxRegularLength =
+ (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
+
+ // Dispatched behavior.
+ DECL_PRINTER(FixedArray)
+ DECL_VERIFIER(FixedArray)
+#ifdef DEBUG
+ // Checks if two FixedArrays have identical contents.
+ bool IsEqualTo(FixedArray* other);
+#endif
+
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ protected:
+ // Set operation on FixedArray without using write barriers. Can
+ // only be used for storing old space objects or smis.
+ static inline void NoWriteBarrierSet(FixedArray* array, int index,
+ Object* value);
+
+ private:
+ STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
+};
+
+// FixedArray alias added only because of IsFixedArrayExact() predicate, which
+// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
+// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
+class FixedArrayExact final : public FixedArray {
+ public:
+ DECL_CAST(FixedArrayExact)
+};
+
+// FixedDoubleArray describes fixed-sized arrays with element type double.
+class FixedDoubleArray : public FixedArrayBase {
+ public:
+ // Setter and getter for elements.
+ inline double get_scalar(int index);
+ inline uint64_t get_representation(int index);
+ static inline Handle<Object> get(FixedDoubleArray* array, int index,
+ Isolate* isolate);
+ inline void set(int index, double value);
+ inline void set_the_hole(Isolate* isolate, int index);
+ inline void set_the_hole(int index);
+
+ // Checking for the hole.
+ inline bool is_the_hole(Isolate* isolate, int index);
+ inline bool is_the_hole(int index);
+
+ // Garbage collection support.
+ inline static int SizeFor(int length) {
+ return kHeaderSize + length * kDoubleSize;
+ }
+
+ // Gives access to raw memory which stores the array's data.
+ inline double* data_start();
+
+ inline void FillWithHoles(int from, int to);
+
+ // Code Generation support.
+ static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ DECL_CAST(FixedDoubleArray)
+
+ // Maximal allowed size, in bytes, of a single FixedDoubleArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+
+ // Dispatched behavior.
+ DECL_PRINTER(FixedDoubleArray)
+ DECL_VERIFIER(FixedDoubleArray)
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+};
+
+class WeakFixedArray : public FixedArray {
+ public:
+ // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
+ // This function does not check if the value exists already, callers must
+ // ensure this themselves if necessary.
+ static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
+ Handle<HeapObject> value,
+ int* assigned_index = nullptr);
+
+ // Returns true if an entry was found and removed.
+ bool Remove(Handle<HeapObject> value);
+
+ class NullCallback {
+ public:
+ static void Callback(Object* value, int old_index, int new_index) {}
+ };
+
+ template <class CompactionCallback>
+ void Compact();
+
+ inline Object* Get(int index) const;
+ inline void Clear(int index);
+ inline int Length() const;
+
+ inline bool IsEmptySlot(int index) const;
+ static Object* Empty() { return Smi::kZero; }
+
+ class Iterator {
+ public:
+ explicit Iterator(Object* maybe_array) : list_(nullptr) {
+ Reset(maybe_array);
+ }
+ void Reset(Object* maybe_array);
+
+ template <class T>
+ inline T* Next();
+
+ private:
+ int index_;
+ WeakFixedArray* list_;
+#ifdef DEBUG
+ int last_used_index_;
+ DisallowHeapAllocation no_gc_;
+#endif // DEBUG
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ DECL_CAST(WeakFixedArray)
+
+ private:
+ static const int kLastUsedIndexIndex = 0;
+ static const int kFirstIndex = 1;
+
+ static Handle<WeakFixedArray> Allocate(
+ Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
+
+ static void Set(Handle<WeakFixedArray> array, int index,
+ Handle<HeapObject> value);
+ inline void clear(int index);
+
+ inline int last_used_index() const;
+ inline void set_last_used_index(int index);
+
+ // Disallow inherited setters.
+ void set(int index, Smi* value);
+ void set(int index, Object* value);
+ void set(int index, Object* value, WriteBarrierMode mode);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+};
+
+// Generic array grows dynamically with O(1) amortized insertion.
+//
+// ArrayList is a FixedArray with static convenience methods for adding more
+// elements. The Length() method returns the number of elements in the list, not
+// the allocated size. The number of elements is stored at kLengthIndex and is
+// updated with every insertion. The elements of the ArrayList are stored in the
+// underlying FixedArray starting at kFirstIndex.
+class ArrayList : public FixedArray {
+ public:
+ enum AddMode {
+ kNone,
+ // Use this if GC can delete elements from the array.
+ kReloadLengthAfterAllocation,
+ };
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
+ AddMode mode = kNone);
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
+ Handle<Object> obj2, AddMode = kNone);
+ static Handle<ArrayList> New(Isolate* isolate, int size);
+
+ // Returns the number of elements in the list, not the allocated size, which
+ // is length(). Lower and upper case length() return different results!
+ inline int Length() const;
+
+ // Sets the Length() as used by Elements(). Does not change the underlying
+ // storage capacity, i.e., length().
+ inline void SetLength(int length);
+ inline Object* Get(int index) const;
+ inline Object** Slot(int index);
+
+ // Set the element at index to obj. The underlying array must be large enough.
+ // If you need to grow the ArrayList, use the static Add() methods instead.
+ inline void Set(int index, Object* obj,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Set the element at index to undefined. This does not change the Length().
+ inline void Clear(int index, Object* undefined);
+
+ // Return a copy of the list of size Length() without the first entry. The
+ // number returned by Length() is stored in the first entry.
+ static Handle<FixedArray> Elements(Handle<ArrayList> array);
+ bool IsFull();
+ DECL_CAST(ArrayList)
+
+ private:
+ static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
+ static const int kLengthIndex = 0;
+ static const int kFirstIndex = 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
+};
+
+enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
+
+template <SearchMode search_mode, typename T>
+inline int Search(T* array, Name* name, int valid_entries = 0,
+ int* out_insertion_index = nullptr);
+
+// ByteArray represents fixed sized byte arrays. Used for the relocation info
+// that is attached to code objects.
+class ByteArray : public FixedArrayBase {
+ public:
+ inline int Size();
+
+ // Setter and getter.
+ inline byte get(int index) const;
+ inline void set(int index, byte value);
+
+ // Copy in / copy out whole byte slices.
+ inline void copy_out(int index, byte* buffer, int length);
+ inline void copy_in(int index, const byte* buffer, int length);
+
+ // Treat contents as an int array.
+ inline int get_int(int index) const;
+ inline void set_int(int index, int value);
+
+ inline uint32_t get_uint32(int index) const;
+ inline void set_uint32(int index, uint32_t value);
+
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic.
+ inline void clear_padding();
+
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
+ }
+ // We use byte arrays for free blocks in the heap. Given a desired size in
+ // bytes that is a multiple of the word size and big enough to hold a byte
+ // array, this function returns the number of elements a byte array should
+ // have.
+ static int LengthFor(int size_in_bytes) {
+ DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_GE(size_in_bytes, kHeaderSize);
+ return size_in_bytes - kHeaderSize;
+ }
+
+ // Returns data start address.
+ inline Address GetDataStartAddress();
+
+ inline int DataSize() const;
+
+ // Returns a pointer to the ByteArray object for a given data start address.
+ static inline ByteArray* FromDataStartAddress(Address address);
+
+ DECL_CAST(ByteArray)
+
+ // Dispatched behavior.
+ inline int ByteArraySize();
+ DECL_PRINTER(ByteArray)
+ DECL_VERIFIER(ByteArray)
+
+ // Layout description.
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ // Maximal memory consumption for a single ByteArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single ByteArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
+};
+
+// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
+// as they can be copied with memcpy.
+template <class T>
+class PodArray : public ByteArray {
+ public:
+ static Handle<PodArray<T>> New(Isolate* isolate, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ void copy_out(int index, T* result) {
+ ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
+ sizeof(T));
+ }
+ T get(int index) {
+ T result;
+ copy_out(index, &result);
+ return result;
+ }
+ void set(int index, const T& value) {
+ copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
+ sizeof(T));
+ }
+ int length() { return ByteArray::length() / sizeof(T); }
+ DECL_CAST(PodArray<T>)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
+};
+
+// V has parameters (Type, type, TYPE, C type, element_size)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+
+class FixedTypedArrayBase : public FixedArrayBase {
+ public:
+ // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
+ DECL_ACCESSORS(base_pointer, Object)
+
+ // [external_pointer]: Contains the offset between base_pointer and the start
+ // of the data. If the base_pointer is a nullptr, the external_pointer
+ // therefore points to the actual backing store.
+ DECL_ACCESSORS(external_pointer, void)
+
+ // Dispatched behavior.
+ DECL_CAST(FixedTypedArrayBase)
+
+ static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
+ static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
+ static const int kHeaderSize =
+ DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
+
+ static const int kDataOffset = kHeaderSize;
+
+ static const int kMaxElementSize = 8;
+
+#ifdef V8_HOST_ARCH_32_BIT
+ static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
+#else
+ static const size_t kMaxByteLength =
+ static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
+#endif // V8_HOST_ARCH_32_BIT
+
+ static const size_t kMaxLength = Smi::kMaxValue;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ inline int size() const;
+
+ static inline int TypedArraySize(InstanceType type, int length);
+ inline int TypedArraySize(InstanceType type) const;
+
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
+ inline int DataSize() const;
+
+ inline size_t ByteLength() const;
+
+ private:
+ static inline int ElementSize(InstanceType type);
+
+ inline int DataSize(InstanceType type) const;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
+};
+
+template <class Traits>
+class FixedTypedArray : public FixedTypedArrayBase {
+ public:
+ typedef typename Traits::ElementType ElementType;
+ static const InstanceType kInstanceType = Traits::kInstanceType;
+
+ DECL_CAST(FixedTypedArray<Traits>)
+
+ static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
+ inline ElementType get_scalar(int index);
+ static inline Handle<Object> get(FixedTypedArray* array, int index);
+ inline void set(int index, ElementType value);
+
+ static inline ElementType from(int value);
+ static inline ElementType from(uint32_t value);
+ static inline ElementType from(double value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ inline void SetValue(uint32_t index, Object* value);
+
+ DECL_PRINTER(FixedTypedArray)
+ DECL_VERIFIER(FixedTypedArray)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
+};
+
+#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
+ STATIC_ASSERT(size <= FixedTypedArrayBase::kMaxElementSize); \
+ class Type##ArrayTraits { \
+ public: /* NOLINT */ \
+ typedef elementType ElementType; \
+ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
+ static const char* Designator() { return #type " array"; } \
+ static inline Handle<Object> ToHandle(Isolate* isolate, \
+ elementType scalar); \
+ static inline elementType defaultValue(); \
+ }; \
+ \
+ typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
+
+#undef FIXED_TYPED_ARRAY_TRAITS
+
+class TemplateList : public FixedArray {
+ public:
+ static Handle<TemplateList> New(Isolate* isolate, int size);
+ inline int length() const;
+ inline Object* get(int index) const;
+ inline void set(int index, Object* value);
+ static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
+ Handle<Object> value);
+ DECL_CAST(TemplateList)
+ private:
+ static const int kLengthIndex = 0;
+ static const int kFirstElementIndex = kLengthIndex + 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FIXED_ARRAY_H_
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index baff7c03b4..a764684a00 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -11,12 +11,116 @@
namespace v8 {
namespace internal {
+int HashTableBase::NumberOfElements() const {
+ return Smi::ToInt(get(kNumberOfElementsIndex));
+}
+
+int HashTableBase::NumberOfDeletedElements() const {
+ return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
+}
+
+int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
+
+void HashTableBase::ElementAdded() {
+ SetNumberOfElements(NumberOfElements() + 1);
+}
+
+void HashTableBase::ElementRemoved() {
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
+}
+
+void HashTableBase::ElementsRemoved(int n) {
+ SetNumberOfElements(NumberOfElements() - n);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
+}
+
+// static
+int HashTableBase::ComputeCapacity(int at_least_space_for) {
+ // Add 50% slack to make slot collisions sufficiently unlikely.
+ // See matching computation in HashTable::HasSufficientCapacityToAdd().
+ // Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
+ int raw_cap = at_least_space_for + (at_least_space_for >> 1);
+ int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
+ return Max(capacity, kMinCapacity);
+}
+
+void HashTableBase::SetNumberOfElements(int nof) {
+ set(kNumberOfElementsIndex, Smi::FromInt(nof));
+}
+
+void HashTableBase::SetNumberOfDeletedElements(int nod) {
+ set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+}
+
+template <typename Key>
+int BaseShape<Key>::GetMapRootIndex() {
+ return Heap::kHashTableMapRootIndex;
+}
+
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Key key) {
+ return FindEntry(GetIsolate(), key);
+}
+
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
+ return FindEntry(isolate, key, Shape::Hash(isolate, key));
+}
+
+// Find entry for key otherwise return kNotFound.
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
+ int32_t hash) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = isolate->heap()->the_hole_value();
+ USE(the_hole);
+ while (true) {
+ Object* element = KeyAt(entry);
+ // Empty entry. Uses raw unchecked accessors because it is called by the
+ // string table during bootstrapping.
+ if (element == undefined) break;
+ if (!(Shape::kNeedsHoleCheck && the_hole == element)) {
+ if (Shape::IsMatch(key, element)) return entry;
+ }
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
template <typename KeyT>
bool BaseShape<KeyT>::IsLive(Isolate* isolate, Object* k) {
Heap* heap = isolate->heap();
return k != heap->the_hole_value() && k != heap->undefined_value();
}
+template <typename Derived, typename Shape>
+HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
+ SLOW_DCHECK(obj->IsHashTable());
+ return reinterpret_cast<HashTable*>(obj);
+}
+
+template <typename Derived, typename Shape>
+const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
+ const Object* obj) {
+ SLOW_DCHECK(obj->IsHashTable());
+ return reinterpret_cast<const HashTable*>(obj);
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
+ return FindEntry(isolate, key, hash) != kNotFound;
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
+ Object* hash = key->GetHash();
+ if (!hash->IsSmi()) return false;
+ return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
+}
+
int OrderedHashSet::GetMapRootIndex() {
return Heap::kOrderedHashSetMapRootIndex;
}
@@ -25,6 +129,11 @@ int OrderedHashMap::GetMapRootIndex() {
return Heap::kOrderedHashMapMapRootIndex;
}
+inline Object* OrderedHashMap::ValueAt(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ return get(EntryToIndex(entry) + kValueOffset);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 9b7ac5deb3..a058b7df39 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -5,10 +5,9 @@
#ifndef V8_OBJECTS_HASH_TABLE_H_
#define V8_OBJECTS_HASH_TABLE_H_
-#include "src/objects.h"
-
#include "src/base/compiler-specific.h"
#include "src/globals.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -896,37 +895,6 @@ class OrderedHashTableIterator : public JSCollectionIterator {
DISALLOW_IMPLICIT_CONSTRUCTORS(OrderedHashTableIterator);
};
-
-class JSSetIterator
- : public OrderedHashTableIterator<JSSetIterator, OrderedHashSet> {
- public:
- // Dispatched behavior.
- DECL_PRINTER(JSSetIterator)
- DECL_VERIFIER(JSSetIterator)
-
- DECL_CAST(JSSetIterator)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
-};
-
-class JSMapIterator
- : public OrderedHashTableIterator<JSMapIterator, OrderedHashMap> {
- public:
- // Dispatched behavior.
- DECL_PRINTER(JSMapIterator)
- DECL_VERIFIER(JSMapIterator)
-
- DECL_CAST(JSMapIterator)
-
- // Returns the current value of the iterator. This should only be called when
- // |HasMore| returns true.
- inline Object* CurrentValue();
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 6bba2f0054..1128e190b2 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -204,6 +204,15 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
+bool JSTypedArray::HasJSTypedArrayPrototype(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ Object* proto = map()->prototype();
+ if (!proto->IsJSObject()) return false;
+
+ JSObject* proto_obj = JSObject::cast(proto);
+ return proto_obj->map()->prototype() == *isolate->typed_array_prototype();
+}
+
// static
MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -227,6 +236,26 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
+// static
+Handle<JSFunction> JSTypedArray::DefaultConstructor(
+ Isolate* isolate, Handle<JSTypedArray> exemplar) {
+ Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
+ switch (exemplar->type()) {
+#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: { \
+ default_ctor = isolate->type##_array_fun(); \
+ break; \
+ }
+
+ TYPED_ARRAYS(TYPED_ARRAY_CTOR)
+#undef TYPED_ARRAY_CTOR
+ default:
+ UNREACHABLE();
+ }
+
+ return default_ctor;
+}
+
#ifdef VERIFY_HEAP
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index a2d13a766d..806c275c8f 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_ARRAY_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -298,9 +299,12 @@ class JSTypedArray : public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
+ inline bool HasJSTypedArrayPrototype(Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
+ static inline Handle<JSFunction> DefaultConstructor(
+ Isolate* isolate, Handle<JSTypedArray> exemplar);
// ES7 section 22.2.4.6 Create ( constructor, argumentList )
static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
Handle<Object> default_ctor, int argc,
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
new file mode 100644
index 0000000000..7ad24bcf12
--- /dev/null
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_INL_H_
+#define V8_OBJECTS_JS_COLLECTION_INL_H_
+
+#include "src/objects/js-collection.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSCollection, table, Object, kTableOffset)
+ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
+ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
+
+ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
+ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
+
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSMap)
+CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSWeakCollection)
+CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
+
+Object* JSMapIterator::CurrentValue() {
+ OrderedHashMap* table(OrderedHashMap::cast(this->table()));
+ int index = Smi::ToInt(this->index());
+ Object* value = table->ValueAt(index);
+ DCHECK(!value->IsTheHole(table->GetIsolate()));
+ return value;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_INL_H_
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
new file mode 100644
index 0000000000..0777ccf1bd
--- /dev/null
+++ b/deps/v8/src/objects/js-collection.h
@@ -0,0 +1,162 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_H_
+#define V8_OBJECTS_JS_COLLECTION_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSCollection : public JSObject {
+ public:
+ // [table]: the backing hash table
+ DECL_ACCESSORS(table, Object)
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
+};
+
+// The JSSet describes EcmaScript Harmony sets
+class JSSet : public JSCollection {
+ public:
+ DECL_CAST(JSSet)
+
+ static void Initialize(Handle<JSSet> set, Isolate* isolate);
+ static void Clear(Handle<JSSet> set);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSSet)
+ DECL_VERIFIER(JSSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+};
+
+class JSSetIterator
+ : public OrderedHashTableIterator<JSSetIterator, OrderedHashSet> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSSetIterator)
+ DECL_VERIFIER(JSSetIterator)
+
+ DECL_CAST(JSSetIterator)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
+};
+
+// The JSMap describes EcmaScript Harmony maps
+class JSMap : public JSCollection {
+ public:
+ DECL_CAST(JSMap)
+
+ static void Initialize(Handle<JSMap> map, Isolate* isolate);
+ static void Clear(Handle<JSMap> map);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSMap)
+ DECL_VERIFIER(JSMap)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+};
+
+class JSMapIterator
+ : public OrderedHashTableIterator<JSMapIterator, OrderedHashMap> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(JSMapIterator)
+ DECL_VERIFIER(JSMapIterator)
+
+ DECL_CAST(JSMapIterator)
+
+ // Returns the current value of the iterator. This should only be called when
+ // |HasMore| returns true.
+ inline Object* CurrentValue();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
+};
+
+// Base class for both JSWeakMap and JSWeakSet
+class JSWeakCollection : public JSObject {
+ public:
+ DECL_CAST(JSWeakCollection)
+
+ // [table]: the backing hash table mapping keys to values.
+ DECL_ACCESSORS(table, Object)
+
+ // [next]: linked list of encountered weak maps during GC.
+ DECL_ACCESSORS(next, Object)
+
+ static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
+ static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
+ Handle<Object> value, int32_t hash);
+ static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
+ int32_t hash);
+ static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
+ int max_entries);
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kNextOffset = kTableOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
+
+ // Visiting policy defines whether the table and next collection fields
+ // should be visited or not.
+ enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
+
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
+
+ // Don't visit table and next collection fields.
+ typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
+};
+
+// The JSWeakMap describes EcmaScript Harmony weak maps
+class JSWeakMap : public JSWeakCollection {
+ public:
+ DECL_CAST(JSWeakMap)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSWeakMap)
+ DECL_VERIFIER(JSWeakMap)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
+};
+
+// The JSWeakSet describes EcmaScript Harmony weak sets
+class JSWeakSet : public JSWeakCollection {
+ public:
+ DECL_CAST(JSWeakSet)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSWeakSet)
+ DECL_VERIFIER(JSWeakSet)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_H_
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 32c07e879e..69cd5c3104 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -144,13 +144,20 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// After creation the result must be treated as a JSArray in all regards.
class JSRegExpResult : public JSArray {
public:
- // Offsets of object fields.
- static const int kIndexOffset = JSArray::kSize;
- static const int kInputOffset = kIndexOffset + kPointerSize;
- static const int kSize = kInputOffset + kPointerSize;
+#define REG_EXP_RESULT_FIELDS(V) \
+ V(kIndexOffset, kPointerSize) \
+ V(kInputOffset, kPointerSize) \
+ V(kGroupsOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kSize, REG_EXP_RESULT_FIELDS)
+#undef REG_EXP_RESULT_FIELDS
+
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
+ static const int kGroupsIndex = 2;
+ static const int kInObjectPropertyCount = 3;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 6fe34ffa8a..7fb0c712f2 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_LITERAL_OBJECTS_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index a5421a32ca..c78f947b3a 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -5,9 +5,19 @@
#ifndef V8_OBJECTS_MAP_INL_H_
#define V8_OBJECTS_MAP_INL_H_
-#include "src/field-type.h"
#include "src/objects/map.h"
+#include "src/field-type.h"
+#include "src/objects-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/shared-function-info.h"
+#include "src/property.h"
+#include "src/transitions.h"
+
+// For pulling in heap/incremental-marking.h which is needed by
+// ACCESSORS_CHECKED.
+#include "src/heap/heap-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +26,48 @@ namespace internal {
CAST_ACCESSOR(Map)
+ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
+ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset, FLAG_unbox_double_fields)
+ACCESSORS(Map, raw_transitions, Object, kTransitionsOrPrototypeInfoOffset)
+
+// |bit_field| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field, has_non_instance_prototype,
+ Map::HasNonInstancePrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_callable, Map::IsCallableBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_named_interceptor,
+ Map::HasNamedInterceptorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_indexed_interceptor,
+ Map::HasIndexedInterceptorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_undetectable, Map::IsUndetectableBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_access_check_needed,
+ Map::IsAccessCheckNeededBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, is_constructor, Map::IsConstructorBit)
+BIT_FIELD_ACCESSORS(Map, bit_field, has_prototype_slot,
+ Map::HasPrototypeSlotBit)
+
+// |bit_field2| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
+
+// |bit_field3| fields.
+BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
+ Map::IsMigrationTargetBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
+ Map::IsImmutablePrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, new_target_is_base,
+ Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
+ Map::MayHaveInterestingSymbolsBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
+ Map::ConstructionCounterBits)
+
+TYPE_CHECKER(Map, MAP_TYPE)
+
InterceptorInfo* Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo* info = GetFunctionTemplateInfo();
@@ -75,6 +127,597 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
}
}
+bool Map::IsUnboxedDoubleField(FieldIndex index) const {
+ if (!FLAG_unbox_double_fields) return false;
+ if (index.is_hidden_field() || !index.is_inobject()) return false;
+ return !layout_descriptor()->IsTagged(index.property_index());
+}
+
+bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
+ if (UnusedPropertyFields() != 0) return false;
+ if (is_prototype_map()) return false;
+ int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
+ int limit = Max(minimum, GetInObjectProperties());
+ int external = NumberOfFields() - GetInObjectProperties();
+ return external > limit;
+}
+
+PropertyDetails Map::GetLastDescriptorDetails() const {
+ return instance_descriptors()->GetDetails(LastAdded());
+}
+
+int Map::LastAdded() const {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DCHECK_GT(number_of_own_descriptors, 0);
+ return number_of_own_descriptors - 1;
+}
+
+int Map::NumberOfOwnDescriptors() const {
+ return NumberOfOwnDescriptorsBits::decode(bit_field3());
+}
+
+void Map::SetNumberOfOwnDescriptors(int number) {
+ DCHECK_LE(number, instance_descriptors()->number_of_descriptors());
+ CHECK_LE(static_cast<unsigned>(number),
+ static_cast<unsigned>(kMaxNumberOfDescriptors));
+ set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
+}
+
+int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
+
+void Map::SetEnumLength(int length) {
+ if (length != kInvalidEnumCacheSentinel) {
+ DCHECK_LE(length, NumberOfOwnDescriptors());
+ CHECK_LE(static_cast<unsigned>(length),
+ static_cast<unsigned>(kMaxNumberOfDescriptors));
+ }
+ set_bit_field3(EnumLengthBits::update(bit_field3(), length));
+}
+
+FixedArrayBase* Map::GetInitialElements() const {
+ FixedArrayBase* result = nullptr;
+ if (has_fast_elements() || has_fast_string_wrapper_elements()) {
+ result = GetHeap()->empty_fixed_array();
+ } else if (has_fast_sloppy_arguments_elements()) {
+ result = GetHeap()->empty_sloppy_arguments_elements();
+ } else if (has_fixed_typed_array_elements()) {
+ result = GetHeap()->EmptyFixedTypedArrayForMap(this);
+ } else if (has_dictionary_elements()) {
+ result = GetHeap()->empty_slow_element_dictionary();
+ } else {
+ UNREACHABLE();
+ }
+ DCHECK(!GetHeap()->InNewSpace(result));
+ return result;
+}
+
+VisitorId Map::visitor_id() const {
+ return static_cast<VisitorId>(
+ RELAXED_READ_BYTE_FIELD(this, kVisitorIdOffset));
+}
+
+void Map::set_visitor_id(VisitorId id) {
+ CHECK_LT(static_cast<unsigned>(id), 256);
+ RELAXED_WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
+}
+
+int Map::instance_size_in_words() const {
+ return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeInWordsOffset);
+}
+
+void Map::set_instance_size_in_words(int value) {
+ RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
+}
+
+int Map::instance_size() const {
+ return instance_size_in_words() << kPointerSizeLog2;
+}
+
+void Map::set_instance_size(int value) {
+ CHECK_EQ(0, value & (kPointerSize - 1));
+ value >>= kPointerSizeLog2;
+ CHECK_LT(static_cast<unsigned>(value), 256);
+ set_instance_size_in_words(value);
+}
+
+int Map::inobject_properties_start_or_constructor_function_index() const {
+ return RELAXED_READ_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+}
+
+void Map::set_inobject_properties_start_or_constructor_function_index(
+ int value) {
+ CHECK_LT(static_cast<unsigned>(value), 256);
+ RELAXED_WRITE_BYTE_FIELD(
+ this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
+}
+
+int Map::GetInObjectPropertiesStartInWords() const {
+ DCHECK(IsJSObjectMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+void Map::SetInObjectPropertiesStartInWords(int value) {
+ CHECK(IsJSObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+int Map::GetInObjectProperties() const {
+ DCHECK(IsJSObjectMap());
+ return instance_size_in_words() - GetInObjectPropertiesStartInWords();
+}
+
+int Map::GetConstructorFunctionIndex() const {
+ DCHECK(IsPrimitiveMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+void Map::SetConstructorFunctionIndex(int value) {
+ CHECK(IsPrimitiveMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+int Map::GetInObjectPropertyOffset(int index) const {
+ return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
+}
+
+Handle<Map> Map::AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
+}
+
+InstanceType Map::instance_type() const {
+ return static_cast<InstanceType>(
+ READ_UINT16_FIELD(this, kInstanceTypeOffset));
+}
+
+void Map::set_instance_type(InstanceType value) {
+ WRITE_UINT16_FIELD(this, kInstanceTypeOffset, value);
+}
+
+int Map::UnusedPropertyFields() const {
+ int value = used_or_unused_instance_size_in_words();
+ DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
+ int unused;
+ if (value >= JSObject::kFieldsAdded) {
+ unused = instance_size_in_words() - value;
+ } else {
+ // For out of object properties "used_or_unused_instance_size_in_words"
+ // byte encodes the slack in the property array.
+ unused = value;
+ }
+ return unused;
+}
+
+int Map::used_or_unused_instance_size_in_words() const {
+ return RELAXED_READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
+}
+
+void Map::set_used_or_unused_instance_size_in_words(int value) {
+ CHECK_LE(static_cast<unsigned>(value), 255);
+ RELAXED_WRITE_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset,
+ static_cast<byte>(value));
+}
+
+int Map::UsedInstanceSize() const {
+ int words = used_or_unused_instance_size_in_words();
+ if (words < JSObject::kFieldsAdded) {
+ // All in-object properties are used and the words is tracking the slack
+ // in the property array.
+ return instance_size();
+ }
+ return words * kPointerSize;
+}
+
+void Map::SetInObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ if (!IsJSObjectMap()) {
+ CHECK_EQ(0, value);
+ set_used_or_unused_instance_size_in_words(0);
+ DCHECK_EQ(0, UnusedPropertyFields());
+ return;
+ }
+ CHECK_LE(0, value);
+ DCHECK_LE(value, GetInObjectProperties());
+ int used_inobject_properties = GetInObjectProperties() - value;
+ set_used_or_unused_instance_size_in_words(
+ GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
+ DCHECK_EQ(value, UnusedPropertyFields());
+}
+
+void Map::SetOutOfObjectUnusedPropertyFields(int value) {
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ CHECK_LT(static_cast<unsigned>(value), JSObject::kFieldsAdded);
+ // For out of object properties "used_instance_size_in_words" byte encodes
+ // the slack in the property array.
+ set_used_or_unused_instance_size_in_words(value);
+ DCHECK_EQ(value, UnusedPropertyFields());
+}
+
+void Map::CopyUnusedPropertyFields(Map* map) {
+ set_used_or_unused_instance_size_in_words(
+ map->used_or_unused_instance_size_in_words());
+ DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+}
+
+void Map::AccountAddedPropertyField() {
+ // Update used instance size and unused property fields number.
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+#ifdef DEBUG
+ int new_unused = UnusedPropertyFields() - 1;
+ if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
+#endif
+ int value = used_or_unused_instance_size_in_words();
+ if (value >= JSObject::kFieldsAdded) {
+ if (value == instance_size_in_words()) {
+ AccountAddedOutOfObjectPropertyField(0);
+ } else {
+ // The property is added in-object, so simply increment the counter.
+ set_used_or_unused_instance_size_in_words(value + 1);
+ }
+ } else {
+ AccountAddedOutOfObjectPropertyField(value);
+ }
+ DCHECK_EQ(new_unused, UnusedPropertyFields());
+}
+
+void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
+ unused_in_property_array--;
+ if (unused_in_property_array < 0) {
+ unused_in_property_array += JSObject::kFieldsAdded;
+ }
+ CHECK_LT(static_cast<unsigned>(unused_in_property_array),
+ JSObject::kFieldsAdded);
+ set_used_or_unused_instance_size_in_words(unused_in_property_array);
+ DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
+}
+
+byte Map::bit_field() const { return READ_BYTE_FIELD(this, kBitFieldOffset); }
+
+void Map::set_bit_field(byte value) {
+ WRITE_BYTE_FIELD(this, kBitFieldOffset, value);
+}
+
+byte Map::bit_field2() const { return READ_BYTE_FIELD(this, kBitField2Offset); }
+
+void Map::set_bit_field2(byte value) {
+ WRITE_BYTE_FIELD(this, kBitField2Offset, value);
+}
+
+bool Map::is_abandoned_prototype_map() const {
+ return is_prototype_map() && !owns_descriptors();
+}
+
+bool Map::should_be_fast_prototype_map() const {
+ if (!prototype_info()->IsPrototypeInfo()) return false;
+ return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+}
+
+void Map::set_elements_kind(ElementsKind elements_kind) {
+ CHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
+ set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
+}
+
+ElementsKind Map::elements_kind() const {
+ return Map::ElementsKindBits::decode(bit_field2());
+}
+
+bool Map::has_fast_smi_elements() const {
+ return IsSmiElementsKind(elements_kind());
+}
+
+bool Map::has_fast_object_elements() const {
+ return IsObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_smi_or_object_elements() const {
+ return IsSmiOrObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_double_elements() const {
+ return IsDoubleElementsKind(elements_kind());
+}
+
+bool Map::has_fast_elements() const {
+ return IsFastElementsKind(elements_kind());
+}
+
+bool Map::has_sloppy_arguments_elements() const {
+ return IsSloppyArgumentsElementsKind(elements_kind());
+}
+
+bool Map::has_fast_sloppy_arguments_elements() const {
+ return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+bool Map::has_fast_string_wrapper_elements() const {
+ return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
+bool Map::has_fixed_typed_array_elements() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+}
+
+bool Map::has_dictionary_elements() const {
+ return IsDictionaryElementsKind(elements_kind());
+}
+
+void Map::set_is_dictionary_map(bool value) {
+ uint32_t new_bit_field3 = IsDictionaryMapBit::update(bit_field3(), value);
+ new_bit_field3 = IsUnstableBit::update(new_bit_field3, value);
+ set_bit_field3(new_bit_field3);
+}
+
+bool Map::is_dictionary_map() const {
+ return IsDictionaryMapBit::decode(bit_field3());
+}
+
+void Map::mark_unstable() {
+ set_bit_field3(IsUnstableBit::update(bit_field3(), true));
+}
+
+bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); }
+
+bool Map::CanBeDeprecated() const {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (details.representation().IsNone()) return true;
+ if (details.representation().IsSmi()) return true;
+ if (details.representation().IsDouble()) return true;
+ if (details.representation().IsHeapObject()) return true;
+ if (details.kind() == kData && details.location() == kDescriptor) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Map::NotifyLeafMapLayoutChange() {
+ if (is_stable()) {
+ mark_unstable();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kPrototypeCheckGroup);
+ }
+}
+
+bool Map::CanTransition() const {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+
+bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
+bool Map::IsPrimitiveMap() const {
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ return instance_type() <= LAST_PRIMITIVE_TYPE;
+}
+bool Map::IsJSReceiverMap() const {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+bool Map::IsJSObjectMap() const {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
+bool Map::IsJSFunctionMap() const {
+ return instance_type() == JS_FUNCTION_TYPE;
+}
+bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
+bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
+bool Map::IsJSGlobalProxyMap() const {
+ return instance_type() == JS_GLOBAL_PROXY_TYPE;
+}
+bool Map::IsJSGlobalObjectMap() const {
+ return instance_type() == JS_GLOBAL_OBJECT_TYPE;
+}
+bool Map::IsJSTypedArrayMap() const {
+ return instance_type() == JS_TYPED_ARRAY_TYPE;
+}
+bool Map::IsJSDataViewMap() const {
+ return instance_type() == JS_DATA_VIEW_TYPE;
+}
+
+Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
+
+void Map::set_prototype(Object* value, WriteBarrierMode mode) {
+ DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
+ WRITE_FIELD(this, kPrototypeOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
+}
+
+LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
+ DCHECK(FLAG_unbox_double_fields);
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ return LayoutDescriptor::cast_gc_safe(layout_desc);
+}
+
+bool Map::HasFastPointerLayout() const {
+ DCHECK(FLAG_unbox_double_fields);
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ return LayoutDescriptor::IsFastPointerLayout(layout_desc);
+}
+
+void Map::UpdateDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_desc) {
+ set_instance_descriptors(descriptors);
+ if (FLAG_unbox_double_fields) {
+ if (layout_descriptor()->IsSlowLayout()) {
+ set_layout_descriptor(layout_desc);
+ }
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ CHECK_EQ(Map::GetVisitorId(this), visitor_id());
+ }
+#else
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+ DCHECK(visitor_id() == Map::GetVisitorId(this));
+#endif
+ }
+}
+
+void Map::InitializeDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_desc) {
+ int len = descriptors->number_of_descriptors();
+ set_instance_descriptors(descriptors);
+ SetNumberOfOwnDescriptors(len);
+
+ if (FLAG_unbox_double_fields) {
+ set_layout_descriptor(layout_desc);
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ }
+#else
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+#endif
+ set_visitor_id(Map::GetVisitorId(this));
+ }
+}
+
+void Map::set_bit_field3(uint32_t bits) {
+ if (kInt32Size != kPointerSize) {
+ WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
+ }
+ WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
+}
+
+uint32_t Map::bit_field3() const {
+ return READ_UINT32_FIELD(this, kBitField3Offset);
+}
+
+LayoutDescriptor* Map::GetLayoutDescriptor() const {
+ return FLAG_unbox_double_fields ? layout_descriptor()
+ : LayoutDescriptor::FastPointerLayout();
+}
+
+void Map::AppendDescriptor(Descriptor* desc) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ descriptors->Append(desc);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
+
+ // Properly mark the map if the {desc} is an "interesting symbol".
+ if (desc->GetKey()->IsInterestingSymbol()) {
+ set_may_have_interesting_symbols(true);
+ }
+ PropertyDetails details = desc->GetDetails();
+ if (details.location() == kField) {
+ DCHECK_GT(UnusedPropertyFields(), 0);
+ AccountAddedPropertyField();
+ }
+
+// This function does not support appending double field descriptors and
+// it should never try to (otherwise, layout descriptor must be updated too).
+#ifdef DEBUG
+ DCHECK(details.location() != kField || !details.representation().IsDouble());
+#endif
+}
+
+Object* Map::GetBackPointer() const {
+ Object* object = constructor_or_backpointer();
+ if (object->IsMap()) {
+ return object;
+ }
+ return GetIsolate()->heap()->undefined_value();
+}
+
+Map* Map::ElementsTransitionMap() {
+ DisallowHeapAllocation no_gc;
+ return TransitionsAccessor(this, &no_gc)
+ .SearchSpecial(GetHeap()->elements_transition_symbol());
+}
+
+Object* Map::prototype_info() const {
+ DCHECK(is_prototype_map());
+ return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
+}
+
+void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
+ CHECK(is_prototype_map());
+ WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, Map::kTransitionsOrPrototypeInfoOffset, value, mode);
+}
+
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+ CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
+ CHECK(value->IsMap());
+ CHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
+ constructor_or_backpointer());
+ set_constructor_or_backpointer(value, mode);
+}
+
+ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
+ACCESSORS(Map, constructor_or_backpointer, Object,
+ kConstructorOrBackPointerOffset)
+
+Object* Map::GetConstructor() const {
+ Object* maybe_constructor = constructor_or_backpointer();
+ // Follow any back pointers.
+ while (maybe_constructor->IsMap()) {
+ maybe_constructor =
+ Map::cast(maybe_constructor)->constructor_or_backpointer();
+ }
+ return maybe_constructor;
+}
+
+FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
+ Object* constructor = GetConstructor();
+ if (constructor->IsJSFunction()) {
+ DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
+ return JSFunction::cast(constructor)->shared()->get_api_func_data();
+ }
+ DCHECK(constructor->IsFunctionTemplateInfo());
+ return FunctionTemplateInfo::cast(constructor);
+}
+
+void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
+ // Never overwrite a back pointer with a constructor.
+ CHECK(!constructor_or_backpointer()->IsMap());
+ set_constructor_or_backpointer(constructor, mode);
+}
+
+Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
+ return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
+ map->UnusedPropertyFields());
+}
+
+bool Map::IsInobjectSlackTrackingInProgress() const {
+ return construction_counter() != Map::kNoSlackTracking;
+}
+
+void Map::InobjectSlackTrackingStep() {
+ // Slack tracking should only be performed on an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
+ if (!IsInobjectSlackTrackingInProgress()) return;
+ int counter = construction_counter();
+ set_construction_counter(counter - 1);
+ if (counter == kSlackTrackingCounterEnd) {
+ CompleteInobjectSlackTracking();
+ }
+}
+
+int Map::SlackForArraySize(int old_size, int size_limit) {
+ const int max_slack = size_limit - old_size;
+ CHECK_LE(0, max_slack);
+ if (old_size < 4) {
+ DCHECK_LE(1, max_slack);
+ return 1;
+ }
+ return Min(max_slack, old_size / 4);
+}
+
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index d9a0a73158..bf0d843884 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -194,8 +194,7 @@ class Map : public HeapObject {
inline InterceptorInfo* GetIndexedInterceptor();
// Instance type.
- inline InstanceType instance_type() const;
- inline void set_instance_type(InstanceType value);
+ DECL_PRIMITIVE_ACCESSORS(instance_type, InstanceType)
// Returns the size of the used in-object area including object header
// (only used for JSObject in fast mode, for the other kinds of objects it
@@ -214,50 +213,69 @@ class Map : public HeapObject {
inline void AccountAddedOutOfObjectPropertyField(
int unused_in_property_array);
+ //
// Bit field.
- inline byte bit_field() const;
- inline void set_bit_field(byte value);
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
+
+// Bit positions for |bit_field|.
+#define MAP_BIT_FIELD_FIELDS(V, _) \
+ V(HasNonInstancePrototypeBit, bool, 1, _) \
+ V(IsCallableBit, bool, 1, _) \
+ V(HasNamedInterceptorBit, bool, 1, _) \
+ V(HasIndexedInterceptorBit, bool, 1, _) \
+ V(IsUndetectableBit, bool, 1, _) \
+ V(IsAccessCheckNeededBit, bool, 1, _) \
+ V(IsConstructorBit, bool, 1, _) \
+ V(HasPrototypeSlotBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD_FIELDS)
+#undef MAP_BIT_FIELD_FIELDS
+ //
// Bit field 2.
- inline byte bit_field2() const;
- inline void set_bit_field2(byte value);
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
+// Bit positions for |bit_field2|.
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ /* One bit is still free here. */ \
+ V(IsExtensibleBit, bool, 1, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
+ V(ElementsKindBits, ElementsKind, 5, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
+#undef MAP_BIT_FIELD2_FIELDS
+
+ //
// Bit field 3.
- inline uint32_t bit_field3() const;
- inline void set_bit_field3(uint32_t bits);
-
- class EnumLengthBits : public BitField<int, 0, kDescriptorIndexBitCount> {
- }; // NOLINT
- class NumberOfOwnDescriptorsBits
- : public BitField<int, kDescriptorIndexBitCount,
- kDescriptorIndexBitCount> {}; // NOLINT
- STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
- class DictionaryMap : public BitField<bool, 20, 1> {};
- class OwnsDescriptors : public BitField<bool, 21, 1> {};
- class HasHiddenPrototype : public BitField<bool, 22, 1> {};
- class Deprecated : public BitField<bool, 23, 1> {};
- class IsUnstable : public BitField<bool, 24, 1> {};
- class IsMigrationTarget : public BitField<bool, 25, 1> {};
- class ImmutablePrototype : public BitField<bool, 26, 1> {};
- class NewTargetIsBase : public BitField<bool, 27, 1> {};
- class MayHaveInterestingSymbols : public BitField<bool, 28, 1> {};
+ //
+ DECL_PRIMITIVE_ACCESSORS(bit_field3, uint32_t)
+
+// Bit positions for |bit_field3|.
+#define MAP_BIT_FIELD3_FIELDS(V, _) \
+ V(EnumLengthBits, int, kDescriptorIndexBitCount, _) \
+ V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
+ V(IsDictionaryMapBit, bool, 1, _) \
+ V(OwnsDescriptorsBit, bool, 1, _) \
+ V(HasHiddenPrototypeBit, bool, 1, _) \
+ V(IsDeprecatedBit, bool, 1, _) \
+ V(IsUnstableBit, bool, 1, _) \
+ V(IsMigrationTargetBit, bool, 1, _) \
+ V(IsImmutablePrototypeBit, bool, 1, _) \
+ V(NewTargetIsBaseBit, bool, 1, _) \
+ V(MayHaveInterestingSymbolsBit, bool, 1, _) \
+ V(ConstructionCounterBits, int, 3, _)
+
+ DEFINE_BIT_FIELDS(MAP_BIT_FIELD3_FIELDS)
+#undef MAP_BIT_FIELD3_FIELDS
STATIC_ASSERT(NumberOfOwnDescriptorsBits::kMax >= kMaxNumberOfDescriptors);
- // Keep this bit field at the very end for better code in
- // Builtins::kJSConstructStubGeneric stub.
- // This counter is used for in-object slack tracking.
- // The in-object slack tracking is considered enabled when the counter is
- // non zero. The counter only has a valid count for initial maps. For
- // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
- // slack tracking already finished for the transition tree. Any other value
- // indicates that either inobject slack tracking is still in progress, or that
- // the map isn't part of the transition tree anymore.
- class ConstructionCounter : public BitField<int, 29, 3> {};
static const int kSlackTrackingCounterStart = 7;
static const int kSlackTrackingCounterEnd = 1;
static const int kNoSlackTracking = 0;
- STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
+ STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounterBits::kMax);
// Inobject slack tracking is the way to reclaim unused inobject space.
//
@@ -310,8 +328,7 @@ class Map : public HeapObject {
// property is set to a value that is not a JSObject, the prototype
// property will not be used to create instances of the function.
// See ECMA-262, 13.2.2.
- inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype() const;
+ DECL_BOOLEAN_ACCESSORS(has_non_instance_prototype)
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
@@ -329,12 +346,10 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(has_hidden_prototype)
// Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor();
- inline bool has_named_interceptor() const;
+ DECL_BOOLEAN_ACCESSORS(has_named_interceptor)
// Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor();
- inline bool has_indexed_interceptor() const;
+ DECL_BOOLEAN_ACCESSORS(has_indexed_interceptor)
// Tells whether the instance is undetectable.
// An undetectable object is a special class of JSObject: 'typeof' operator
@@ -342,21 +357,18 @@ class Map : public HeapObject {
// a normal JS object. It is useful for implementing undetectable
// document.all in Firefox & Safari.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable();
- inline bool is_undetectable() const;
+ DECL_BOOLEAN_ACCESSORS(is_undetectable)
// Tells whether the instance has a [[Call]] internal method.
// This property is implemented according to ES6, section 7.2.3.
- inline void set_is_callable();
- inline bool is_callable() const;
+ DECL_BOOLEAN_ACCESSORS(is_callable)
DECL_BOOLEAN_ACCESSORS(new_target_is_base)
DECL_BOOLEAN_ACCESSORS(is_extensible)
DECL_BOOLEAN_ACCESSORS(is_prototype_map)
inline bool is_abandoned_prototype_map() const;
- inline void set_elements_kind(ElementsKind elements_kind);
- inline ElementsKind elements_kind() const;
+ DECL_PRIMITIVE_ACCESSORS(elements_kind, ElementsKind)
// Tells whether the instance has fast elements that are only Smis.
inline bool has_fast_smi_elements() const;
@@ -409,6 +421,8 @@ class Map : public HeapObject {
static const int kPrototypeChainValid = 0;
static const int kPrototypeChainInvalid = 1;
+ static bool IsPrototypeChainInvalidated(Map* map);
+
// Return the map of the root of object's prototype chain.
Map* GetPrototypeChainRootMap(Isolate* isolate) const;
@@ -489,13 +503,11 @@ class Map : public HeapObject {
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
// False by default and for HeapObjects that are not JSObjects.
- inline void set_dictionary_map(bool value);
- inline bool is_dictionary_map() const;
+ DECL_BOOLEAN_ACCESSORS(is_dictionary_map)
// Tells whether the instance needs security checks when accessing its
// properties.
- inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed() const;
+ DECL_BOOLEAN_ACCESSORS(is_access_check_needed)
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
@@ -563,15 +575,24 @@ class Map : public HeapObject {
inline void SetEnumLength(int length);
DECL_BOOLEAN_ACCESSORS(owns_descriptors)
+
inline void mark_unstable();
inline bool is_stable() const;
- inline void set_migration_target(bool value);
- inline bool is_migration_target() const;
- inline void set_immutable_proto(bool value);
- inline bool is_immutable_proto() const;
+
+ DECL_BOOLEAN_ACCESSORS(is_migration_target)
+
+ DECL_BOOLEAN_ACCESSORS(is_immutable_proto)
+
+ // This counter is used for in-object slack tracking.
+ // The in-object slack tracking is considered enabled when the counter is
+ // non zero. The counter only has a valid count for initial maps. For
+ // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
+ // slack tracking already finished for the transition tree. Any other value
+ // indicates that either inobject slack tracking is still in progress, or that
+ // the map isn't part of the transition tree anymore.
DECL_INT_ACCESSORS(construction_counter)
- inline void deprecate();
- inline bool is_deprecated() const;
+
+ DECL_BOOLEAN_ACCESSORS(is_deprecated)
inline bool CanBeDeprecated() const;
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
@@ -759,22 +780,6 @@ class Map : public HeapObject {
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
- // Bit positions for bit field.
- static const int kHasNonInstancePrototype = 0;
- static const int kIsCallable = 1;
- static const int kHasNamedInterceptor = 2;
- static const int kHasIndexedInterceptor = 3;
- static const int kIsUndetectable = 4;
- static const int kIsAccessCheckNeeded = 5;
- static const int kIsConstructor = 6;
- static const int kHasPrototypeSlot = 7;
-
- // Bit positions for bit field 2
- static const int kIsExtensible = 0;
- // Bit 1 is free.
- class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
- class ElementsKindBits : public BitField<ElementsKind, 3, 5> {};
-
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset, kSize>
BodyDescriptor;
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 4040d05bca..b9d7697fb5 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -199,29 +199,69 @@ void Module::SetStatus(Status new_status) {
set_status(new_status);
}
+void Module::ResetGraph(Handle<Module> module) {
+ DCHECK_NE(module->status(), kInstantiating);
+ DCHECK_NE(module->status(), kEvaluating);
+ if (module->status() != kPreInstantiating) return;
+ Isolate* isolate = module->GetIsolate();
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ Reset(module);
+ for (int i = 0; i < requested_modules->length(); ++i) {
+ Handle<Object> descendant(requested_modules->get(i), isolate);
+ if (descendant->IsModule()) {
+ ResetGraph(Handle<Module>::cast(descendant));
+ } else {
+ DCHECK(descendant->IsUndefined(isolate));
+ }
+ }
+}
+
+void Module::Reset(Handle<Module> module) {
+ Isolate* isolate = module->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ DCHECK(module->status() == kPreInstantiating ||
+ module->status() == kInstantiating);
+ DCHECK(module->exception()->IsTheHole(isolate));
+ DCHECK(module->import_meta()->IsTheHole(isolate));
+ // The namespace object cannot exist, because it would have been created
+ // by RunInitializationCode, which is called only after this module's SCC
+ // succeeds instantiation.
+ DCHECK(!module->module_namespace()->IsJSModuleNamespace());
+
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate, module->info()->RegularExportCount());
+ Handle<FixedArray> regular_exports =
+ factory->NewFixedArray(module->regular_exports()->length());
+ Handle<FixedArray> regular_imports =
+ factory->NewFixedArray(module->regular_imports()->length());
+ Handle<FixedArray> requested_modules =
+ factory->NewFixedArray(module->requested_modules()->length());
+
+ if (module->status() == kInstantiating) {
+ module->set_code(JSFunction::cast(module->code())->shared());
+ }
+#ifdef DEBUG
+ module->PrintStatusTransition(kUninstantiated);
+#endif // DEBUG
+ module->set_status(kUninstantiated);
+ module->set_exports(*exports);
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_requested_modules(*requested_modules);
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
+}
+
void Module::RecordError() {
DisallowHeapAllocation no_alloc;
-
Isolate* isolate = GetIsolate();
+
+ DCHECK(exception()->IsTheHole(isolate));
Object* the_exception = isolate->pending_exception();
DCHECK(!the_exception->IsTheHole(isolate));
- switch (status()) {
- case Module::kUninstantiated:
- case Module::kPreInstantiating:
- case Module::kInstantiating:
- case Module::kEvaluating:
- break;
- case Module::kErrored:
- DCHECK_EQ(exception(), the_exception);
- return;
- default:
- UNREACHABLE();
- }
-
set_code(info());
-
- DCHECK(exception()->IsTheHole(isolate));
#ifdef DEBUG
PrintStatusTransition(Module::kErrored);
#endif // DEBUG
@@ -232,9 +272,8 @@ void Module::RecordError() {
Object* Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
- Object* the_exception = exception();
- DCHECK(!the_exception->IsTheHole(GetIsolate()));
- return the_exception;
+ DCHECK(!exception()->IsTheHole(GetIsolate()));
+ return exception();
}
MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
@@ -244,29 +283,25 @@ MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
- MaybeHandle<Cell> result = Module::ResolveExport(requested_module, name, loc,
- must_resolve, resolve_set);
- if (isolate->has_pending_exception()) {
- DCHECK(result.is_null());
- if (must_resolve) module->RecordError();
- // If {must_resolve} is false and there's an exception, then either that
- // exception was already recorded where it happened, or it's the
- // kAmbiguousExport exception (see ResolveExportUsingStarExports) and the
- // culprit module is still to be determined.
- }
+ Handle<String> specifier(
+ String::cast(module->info()->module_requests()->get(module_request)),
+ isolate);
+ MaybeHandle<Cell> result = Module::ResolveExport(
+ requested_module, specifier, name, loc, must_resolve, resolve_set);
+ DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
return result;
}
MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
- Handle<String> name,
+ Handle<String> module_specifier,
+ Handle<String> export_name,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- DCHECK_NE(module->status(), kErrored);
- DCHECK_NE(module->status(), kEvaluating);
DCHECK_GE(module->status(), kPreInstantiating);
+ DCHECK_NE(module->status(), kEvaluating);
Isolate* isolate = module->GetIsolate();
- Handle<Object> object(module->exports()->Lookup(name), isolate);
+ Handle<Object> object(module->exports()->Lookup(export_name), isolate);
if (object->IsCell()) {
// Already resolved (e.g. because it's a local export).
return Handle<Cell>::cast(object);
@@ -282,17 +317,18 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
Zone* zone = resolve_set->zone();
name_set =
new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
- } else if (name_set->count(name)) {
+ } else if (name_set->count(export_name)) {
// Cycle detected.
if (must_resolve) {
return isolate->Throw<Cell>(
isolate->factory()->NewSyntaxError(
- MessageTemplate::kCyclicModuleDependency, name),
+ MessageTemplate::kCyclicModuleDependency, export_name,
+ module_specifier),
&loc);
}
return MaybeHandle<Cell>();
}
- name_set->insert(name);
+ name_set->insert(export_name);
}
if (object->IsModuleInfoEntry()) {
@@ -313,23 +349,24 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
// The export table may have changed but the entry in question should be
// unchanged.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsModuleInfoEntry());
+ DCHECK(exports->Lookup(export_name)->IsModuleInfoEntry());
- exports = ObjectHashTable::Put(exports, name, cell);
+ exports = ObjectHashTable::Put(exports, export_name, cell);
module->set_exports(*exports);
return cell;
}
DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(module, name, loc, must_resolve,
- resolve_set);
+ return Module::ResolveExportUsingStarExports(
+ module, module_specifier, export_name, loc, must_resolve, resolve_set);
}
MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, Module::ResolveSet* resolve_set) {
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
Isolate* isolate = module->GetIsolate();
- if (!name->Equals(isolate->heap()->default_string())) {
+ if (!export_name->Equals(isolate->heap()->default_string())) {
// Go through all star exports looking for the given name. If multiple star
// exports provide the name, make sure they all map it to the same cell.
Handle<Cell> unique_cell;
@@ -346,15 +383,15 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (ResolveImport(module, name, entry->module_request(), new_loc, false,
- resolve_set)
+ if (ResolveImport(module, export_name, entry->module_request(), new_loc,
+ false, resolve_set)
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport, name),
- &loc);
+ return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
}
} else if (isolate->has_pending_exception()) {
return MaybeHandle<Cell>();
@@ -364,8 +401,8 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
if (!unique_cell.is_null()) {
// Found a unique star export for this name.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, unique_cell);
+ DCHECK(exports->Lookup(export_name)->IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, export_name, unique_cell);
module->set_exports(*exports);
return unique_cell;
}
@@ -373,9 +410,10 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
// Unresolvable.
if (must_resolve) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kUnresolvableExport, name),
- &loc);
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
}
return MaybeHandle<Cell>();
}
@@ -393,27 +431,24 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
}
#endif // DEBUG
- Isolate* isolate = module->GetIsolate();
- if (module->status() == kErrored) {
- isolate->Throw(module->GetException());
- return false;
- }
-
if (!PrepareInstantiate(module, context, callback)) {
+ ResetGraph(module);
return false;
}
+ Isolate* isolate = module->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
ZoneForwardList<Handle<Module>> stack(&zone);
unsigned dfs_index = 0;
if (!FinishInstantiate(module, &stack, &dfs_index, &zone)) {
for (auto& descendant : stack) {
- descendant->RecordError();
+ Reset(descendant);
}
- DCHECK_EQ(module->GetException(), isolate->pending_exception());
+ DCHECK_EQ(module->status(), kUninstantiated);
return false;
}
- DCHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+ DCHECK(module->status() == kInstantiated || module->status() == kEvaluated ||
+ module->status() == kErrored);
DCHECK(stack.empty());
return true;
}
@@ -421,7 +456,6 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
bool Module::PrepareInstantiate(Handle<Module> module,
v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
- DCHECK_NE(module->status(), kErrored);
DCHECK_NE(module->status(), kEvaluating);
DCHECK_NE(module->status(), kInstantiating);
if (module->status() >= kPreInstantiating) return true;
@@ -439,17 +473,9 @@ bool Module::PrepareInstantiate(Handle<Module> module,
v8::Utils::ToLocal(module))
.ToLocal(&api_requested_module)) {
isolate->PromoteScheduledException();
- module->RecordError();
return false;
}
Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
- if (requested_module->status() == kErrored) {
- // TODO(neis): Move this into callback?
- isolate->Throw(requested_module->GetException());
- module->RecordError();
- DCHECK_EQ(module->GetException(), requested_module->GetException());
- return false;
- }
requested_modules->set(i, *requested_module);
}
@@ -458,8 +484,6 @@ bool Module::PrepareInstantiate(Handle<Module> module,
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
if (!PrepareInstantiate(requested_module, context, callback)) {
- module->RecordError();
- DCHECK_EQ(module->GetException(), requested_module->GetException());
return false;
}
}
@@ -531,7 +555,6 @@ void Module::MaybeTransitionComponent(Handle<Module> module,
bool Module::FinishInstantiate(Handle<Module> module,
ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index, Zone* zone) {
- DCHECK_NE(module->status(), kErrored);
DCHECK_NE(module->status(), kEvaluating);
if (module->status() >= kInstantiating) return true;
DCHECK_EQ(module->status(), kPreInstantiating);
@@ -560,7 +583,6 @@ bool Module::FinishInstantiate(Handle<Module> module,
return false;
}
- DCHECK_NE(requested_module->status(), kErrored);
DCHECK_NE(requested_module->status(), kEvaluating);
DCHECK_GE(requested_module->status(), kInstantiating);
SLOW_DCHECK(
@@ -606,8 +628,8 @@ bool Module::FinishInstantiate(Handle<Module> module,
if (name->IsUndefined(isolate)) continue; // Star export.
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(zone);
- if (ResolveExport(module, Handle<String>::cast(name), loc, true,
- &resolve_set)
+ if (ResolveExport(module, Handle<String>(), Handle<String>::cast(name), loc,
+ true, &resolve_set)
.is_null()) {
return false;
}
@@ -722,7 +744,6 @@ namespace {
void FetchStarExports(Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
- DCHECK_NE(module->status(), Module::kErrored);
DCHECK_GE(module->status(), Module::kInstantiating);
if (module->module_namespace()->IsJSModuleNamespace()) return; // Shortcut.
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 7680f55313..fe374d3fc6 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_MODULE_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -153,15 +154,17 @@ class Module : public Struct {
// exception (so check manually!).
class ResolveSet;
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
Handle<Module> module, Handle<String> name, int module_request,
MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, MessageLocation loc,
- bool must_resolve, ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> module_specifier,
+ Handle<String> export_name, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
static MUST_USE_RESULT bool PrepareInstantiate(
Handle<Module> module, v8::Local<v8::Context> context,
@@ -179,6 +182,11 @@ class Module : public Struct {
ZoneForwardList<Handle<Module>>* stack,
Status new_status);
+ // Set module's status back to kUninstantiated and reset other internal state.
+ // This is used when instantiation fails.
+ static void Reset(Handle<Module> module);
+ static void ResetGraph(Handle<Module> module);
+
// To set status to kErrored, RecordError should be used.
void SetStatus(Status status);
void RecordError();
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 5d367d351f..604942a272 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -54,8 +54,9 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
type* holder::name() const { \
+ type* value = type::cast(READ_FIELD(this, offset)); \
DCHECK(get_condition); \
- return type::cast(READ_FIELD(this, offset)); \
+ return value; \
} \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index b03b1e831e..3a8459a204 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
#include "src/utils.h"
// Has to be the last include (doesn't have include guards):
@@ -306,12 +307,14 @@ class ScopeInfo : public FixedArray {
class HasSimpleParametersField
: public BitField<bool, AsmModuleField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 11> {};
class HasOuterScopeInfoField
: public BitField<bool, FunctionKindField::kNext, 1> {};
class IsDebugEvaluateScopeField
: public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
+ STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
+
// Properties of variables.
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 2544b4e20e..c5bd407628 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -26,7 +26,8 @@ ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
SMI_ACCESSORS(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
@@ -35,9 +36,39 @@ SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
ACCESSORS(Script, host_defined_options, FixedArray, kHostDefinedOptionsOffset)
-ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, wasm_compiled_module, Object,
+ kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
+bool Script::is_wrapped() const {
+ return eval_from_shared_or_wrapped_arguments()->IsFixedArray();
+}
+
+bool Script::has_eval_from_shared() const {
+ return eval_from_shared_or_wrapped_arguments()->IsSharedFunctionInfo();
+}
+
+void Script::set_eval_from_shared(SharedFunctionInfo* shared,
+ WriteBarrierMode mode) {
+ DCHECK(!is_wrapped());
+ set_eval_from_shared_or_wrapped_arguments(shared, mode);
+}
+
+SharedFunctionInfo* Script::eval_from_shared() const {
+ DCHECK(has_eval_from_shared());
+ return SharedFunctionInfo::cast(eval_from_shared_or_wrapped_arguments());
+}
+
+void Script::set_wrapped_arguments(FixedArray* value, WriteBarrierMode mode) {
+ DCHECK(!has_eval_from_shared());
+ set_eval_from_shared_or_wrapped_arguments(value, mode);
+}
+
+FixedArray* Script::wrapped_arguments() const {
+ DCHECK(is_wrapped());
+ return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
+}
+
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL
: COMPILATION_TYPE_HOST;
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index ae4a87914d..4d84be2262 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_SCRIPT_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -62,9 +63,21 @@ class Script : public Struct {
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
+ DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments, Object)
+
// [eval_from_shared]: for eval scripts the shared function info for the
// function from which eval was called.
- DECL_ACCESSORS(eval_from_shared, Object)
+ DECL_ACCESSORS(eval_from_shared, SharedFunctionInfo)
+
+ // [wrapped_arguments]: for the list of arguments in a wrapped script.
+ DECL_ACCESSORS(wrapped_arguments, FixedArray)
+
+ // Whether the script is implicitly wrapped in a function.
+ inline bool is_wrapped() const;
+
+ // Whether the eval_from_shared field is set with a shared function info
+ // for the eval site.
+ inline bool has_eval_from_shared() const;
// [eval_from_position]: the source position in the code for the function
// from which eval was called, as positive integer. Or the code offset in the
@@ -118,6 +131,9 @@ class Script : public Struct {
// Retrieve source position from where eval was called.
int GetEvalPosition();
+ // Check if the script contains any Asm modules.
+ bool ContainsAsmModule();
+
// Init line_ends array with source code positions of line ends.
static void InitLineEnds(Handle<Script> script);
@@ -186,9 +202,10 @@ class Script : public Struct {
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
+ static const int kEvalFromSharedOrWrappedArgumentsOffset =
+ kIdOffset + kPointerSize;
static const int kEvalFromPositionOffset =
- kEvalFromSharedOffset + kPointerSize;
+ kEvalFromSharedOrWrappedArgumentsOffset + kPointerSize;
static const int kSharedFunctionInfosOffset =
kEvalFromPositionOffset + kPointerSize;
static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 0c35933950..57a72754b5 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -80,6 +80,8 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
}
}
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_wrapped,
+ SharedFunctionInfo::IsWrappedBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index a43c2a12b7..8e996042c0 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -254,7 +254,7 @@ class SharedFunctionInfo : public HeapObject {
String* DebugName();
// The function cannot cause any side effects.
- bool HasNoSideEffect();
+ static bool HasNoSideEffect(Handle<SharedFunctionInfo> info);
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
@@ -288,6 +288,9 @@ class SharedFunctionInfo : public HeapObject {
inline LanguageMode language_mode();
inline void set_language_mode(LanguageMode language_mode);
+ // Indicates whether the source is implicitly wrapped in a function.
+ DECL_BOOLEAN_ACCESSORS(is_wrapped)
+
// True if the function has any duplicated parameter names.
DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -336,8 +339,8 @@ class SharedFunctionInfo : public HeapObject {
// [source code]: Source code for the function.
bool HasSourceCode() const;
- Handle<Object> GetSourceCode();
- Handle<Object> GetSourceCodeHarmony();
+ static Handle<Object> GetSourceCode(Handle<SharedFunctionInfo> shared);
+ static Handle<Object> GetSourceCodeHarmony(Handle<SharedFunctionInfo> shared);
// Tells whether this function should be subject to debugging.
inline bool IsSubjectToDebugging();
@@ -465,22 +468,25 @@ class SharedFunctionInfo : public HeapObject {
#define COMPILER_HINTS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 10, _) \
+ V(IsWrappedBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 11, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
V(IsDeclarationBit, bool, 1, _) \
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 5, _) \
- V(DisabledOptimizationReasonBits, BailoutReason, 7, _) \
+ V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
V(RequiresInstanceFieldsInitializer, bool, 1, _)
DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
#undef COMPILER_HINTS_BIT_FIELDS
// Bailout reasons must fit in the DisabledOptimizationReason bitfield.
- STATIC_ASSERT(kLastErrorMessage <= DisabledOptimizationReasonBits::kMax);
+ STATIC_ASSERT(BailoutReason::kLastErrorMessage <=
+ DisabledOptimizationReasonBits::kMax);
+ STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
// Masks for checking if certain FunctionKind bits are set without fully
// decoding of the FunctionKind bit field.
static const int kClassConstructorMask = FunctionKind::kClassConstructor
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index dd75210a54..9b64444de2 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -525,11 +525,42 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) {
ACCESSORS(ThinString, actual, String, kActualOffset);
+HeapObject* ThinString::unchecked_actual() const {
+ return reinterpret_cast<HeapObject*>(READ_FIELD(this, kActualOffset));
+}
+
bool ExternalString::is_short() {
InstanceType type = map()->instance_type();
return (type & kShortExternalStringMask) == kShortExternalStringTag;
}
+Address ExternalString::resource_as_address() {
+ return *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset));
+}
+
+void ExternalString::set_address_as_resource(Address address) {
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(address), kPointerSize));
+ *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset)) = address;
+ if (IsExternalOneByteString()) {
+ ExternalOneByteString::cast(this)->update_data_cache();
+ } else {
+ ExternalTwoByteString::cast(this)->update_data_cache();
+ }
+}
+
+uint32_t ExternalString::resource_as_uint32() {
+ return static_cast<uint32_t>(
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)));
+}
+
+void ExternalString::set_uint32_as_resource(uint32_t value) {
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)) = value;
+ if (is_short()) return;
+ const char** data_field =
+ reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = nullptr;
+}
+
const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index f21171d62f..066fc6d879 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -631,6 +631,7 @@ class ThinString : public String {
public:
// Actual string that this ThinString refers to.
inline String* actual() const;
+ inline HeapObject* unchecked_actual() const;
inline void set_actual(String* s,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -718,6 +719,12 @@ class ExternalString : public String {
// Return whether external string is short (data pointer is not cached).
inline bool is_short();
+ // Used in the serializer/deserializer.
+ inline Address resource_as_address();
+ inline void set_address_as_resource(Address address);
+ inline uint32_t resource_as_uint32();
+ inline void set_uint32_as_resource(uint32_t value);
+
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
private:
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 5c7b1631a2..66b57020ad 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -50,14 +50,14 @@ OFStream::~OFStream() {}
namespace {
// Locale-independent predicates.
-bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7e; }
-bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xd) || c == 0x20; }
+bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7E; }
+bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xD) || c == 0x20; }
bool IsOK(uint16_t c) { return (IsPrint(c) || IsSpace(c)) && c != '\\'; }
std::ostream& PrintUC16(std::ostream& os, uint16_t c, bool (*pred)(uint16_t)) {
char buf[10];
- const char* format = pred(c) ? "%c" : (c <= 0xff) ? "\\x%02x" : "\\u%04x";
+ const char* format = pred(c) ? "%c" : (c <= 0xFF) ? "\\x%02x" : "\\u%04x";
snprintf(buf, sizeof(buf), format, c);
return os << buf;
}
@@ -124,7 +124,7 @@ std::ostream& operator<<(std::ostream& os, const AsHexBytes& hex) {
if (b) os << " ";
uint8_t printed_byte =
hex.byte_order == AsHexBytes::kLittleEndian ? b : bytes - b - 1;
- os << AsHex((hex.value >> (8 * printed_byte)) & 0xff, 2);
+ os << AsHex((hex.value >> (8 * printed_byte)) & 0xFF, 2);
}
return os;
}
diff --git a/deps/v8/src/parsing/background-parsing-task.cc b/deps/v8/src/parsing/background-parsing-task.cc
index 387cd3a1c6..cb811566df 100644
--- a/deps/v8/src/parsing/background-parsing-task.cc
+++ b/deps/v8/src/parsing/background-parsing-task.cc
@@ -31,7 +31,8 @@ BackgroundParsingTask::BackgroundParsingTask(
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kProduceFullCodeCache ||
- options == ScriptCompiler::kNoCompileOptions);
+ options == ScriptCompiler::kNoCompileOptions ||
+ options == ScriptCompiler::kEagerCompile);
VMState<PARSER> state(isolate);
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 6c6c813b3e..709d5736b5 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -97,14 +97,12 @@ class ExpressionClassifier {
: base_(base),
previous_(base->classifier_),
zone_(base->impl()->zone()),
- non_patterns_to_rewrite_(base->impl()->GetNonPatternList()),
reported_errors_(base->impl()->GetReportedErrorList()),
duplicate_finder_(duplicate_finder),
invalid_productions_(0),
function_properties_(0) {
base->classifier_ = this;
reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
- non_pattern_begin_ = non_patterns_to_rewrite_->length();
}
V8_INLINE ~ExpressionClassifier() {
@@ -291,19 +289,10 @@ class ExpressionClassifier {
Add(Error(loc, message, kLetPatternProduction, arg));
}
- void Accumulate(ExpressionClassifier* inner, unsigned productions,
- bool merge_non_patterns = true) {
+ void Accumulate(ExpressionClassifier* inner, unsigned productions) {
DCHECK_EQ(inner->reported_errors_, reported_errors_);
DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
- DCHECK_EQ(inner->non_patterns_to_rewrite_, non_patterns_to_rewrite_);
- DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
- DCHECK_LE(inner->non_pattern_begin_, non_patterns_to_rewrite_->length());
- // Merge non-patterns from the inner classifier, or discard them.
- if (merge_non_patterns)
- inner->non_pattern_begin_ = non_patterns_to_rewrite_->length();
- else
- non_patterns_to_rewrite_->Rewind(inner->non_pattern_begin_);
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
@@ -368,16 +357,12 @@ class ExpressionClassifier {
reported_errors_end_;
}
- V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
-
V8_INLINE void Discard() {
if (reported_errors_end_ == reported_errors_->length()) {
reported_errors_->Rewind(reported_errors_begin_);
reported_errors_end_ = reported_errors_begin_;
}
DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
- DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
- non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
}
ExpressionClassifier* previous() const { return previous_; }
@@ -424,16 +409,8 @@ class ExpressionClassifier {
typename Types::Base* base_;
ExpressionClassifier* previous_;
Zone* zone_;
- ZoneList<typename Types::RewritableExpression>* non_patterns_to_rewrite_;
ZoneList<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
- // The uint16_t for non_pattern_begin_ will not be enough in the case,
- // e.g., of an array literal containing more than 64K inner array
- // literals with spreads, as in:
- // var N=65536; eval("var x=[];" + "[" + "[...x],".repeat(N) + "].length");
- // An implementation limit error in ParserBase::AddNonPatternForRewriting
- // will be triggered in this case.
- uint16_t non_pattern_begin_;
unsigned invalid_productions_ : 14;
unsigned function_properties_ : 2;
// The uint16_t for reported_errors_begin_ and reported_errors_end_ will
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 18c52add11..bdb0aeadd6 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -85,7 +85,7 @@ void Reparenter::VisitRewritableExpression(RewritableExpression* expr) {
}
void Reparenter::VisitBlock(Block* stmt) {
- if (stmt->scope() != nullptr)
+ if (stmt->scope())
stmt->scope()->ReplaceOuterScope(scope_);
else
VisitStatements(stmt->statements());
@@ -93,7 +93,11 @@ void Reparenter::VisitBlock(Block* stmt) {
void Reparenter::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
- stmt->scope()->ReplaceOuterScope(scope_);
+ if (stmt->scope()) {
+ stmt->scope()->ReplaceOuterScope(scope_);
+ } else {
+ Visit(stmt->catch_block());
+ }
}
void Reparenter::VisitWithStatement(WithStatement* stmt) {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 1c9d648a1e..b8f191dd5a 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -45,7 +45,13 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
Isolate* isolate = shared->GetIsolate();
InitFromIsolate(isolate);
+ // Do not support re-parsing top-level function of a wrapped script.
+ // TODO(yangguo): consider whether we need a top-level function in a
+ // wrapped script at all.
+ DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script())->is_wrapped());
+
set_toplevel(shared->is_toplevel());
+ set_wrapped_as_function(shared->is_wrapped());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_is_named_expression(shared->is_named_expression());
set_compiler_hints(shared->compiler_hints());
@@ -54,8 +60,6 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
function_literal_id_ = shared->function_literal_id();
set_language_mode(shared->language_mode());
set_asm_wasm_broken(shared->is_asm_wasm_broken());
- set_requires_instance_fields_initializer(
- shared->requires_instance_fields_initializer());
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
@@ -90,6 +94,7 @@ ParseInfo::ParseInfo(Handle<Script> script)
set_allow_lazy_parsing();
set_toplevel();
set_script(script);
+ set_wrapped_as_function(script->is_wrapped());
set_native(script->type() == Script::TYPE_NATIVE);
set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
@@ -151,6 +156,11 @@ FunctionKind ParseInfo::function_kind() const {
return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
}
+bool ParseInfo::requires_instance_fields_initializer() const {
+ return SharedFunctionInfo::RequiresInstanceFieldsInitializer::decode(
+ compiler_hints_);
+}
+
void ParseInfo::InitFromIsolate(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
set_hash_seed(isolate->heap()->HashSeed());
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 9deea1ecac..e93c7137ca 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -79,13 +79,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
set_collect_type_profile)
FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
- FLAG_ACCESSOR(kRequiresInstanceFieldsInitializer,
- requires_instance_fields_initializer,
- set_requires_instance_fields_initializer)
FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
set_block_coverage_enabled)
FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
set_on_background_thread)
+ FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
+ set_wrapped_as_function)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -208,6 +207,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
// Getters for individual compiler hints.
bool is_declaration() const;
FunctionKind function_kind() const;
+ bool requires_instance_fields_initializer() const;
//--------------------------------------------------------------------------
// TODO(titzer): these should not be part of ParseInfo.
@@ -261,8 +261,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
kCollectTypeProfile = 1 << 10,
kBlockCoverageEnabled = 1 << 11,
kIsAsmWasmBroken = 1 << 12,
- kRequiresInstanceFieldsInitializer = 1 << 13,
- kOnBackgroundThread = 1 << 14,
+ kOnBackgroundThread = 1 << 13,
+ kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
};
//------------- Inputs to parsing and scope analysis -----------------------
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index c393bc5ec2..faefe44011 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -279,9 +279,11 @@ class ParserBase {
allow_harmony_do_expressions_(false),
allow_harmony_function_sent_(false),
allow_harmony_public_fields_(false),
+ allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
- allow_harmony_async_iteration_(false) {}
+ allow_harmony_optional_catch_binding_(false),
+ allow_harmony_private_fields_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -291,9 +293,10 @@ class ParserBase {
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_public_fields);
+ ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
- ALLOW_ACCESSORS(harmony_async_iteration);
+ ALLOW_ACCESSORS(harmony_optional_catch_binding);
#undef ALLOW_ACCESSORS
@@ -304,6 +307,13 @@ class ParserBase {
scanner()->set_allow_harmony_bigint(allow);
}
+ bool allow_harmony_private_fields() const {
+ return scanner()->allow_harmony_private_fields();
+ }
+ void set_allow_harmony_private_fields(bool allow) {
+ scanner()->set_allow_harmony_private_fields(allow);
+ }
+
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -383,18 +393,27 @@ class ParserBase {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
+ void DisableOptimization(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+
FunctionKind kind() const { return scope()->function_kind(); }
- FunctionState* outer() const { return outer_function_state_; }
void RewindDestructuringAssignments(int pos) {
destructuring_assignments_to_rewrite_.Rewind(pos);
}
- void SetDestructuringAssignmentsScope(int pos, Scope* scope) {
- for (int i = pos; i < destructuring_assignments_to_rewrite_.length();
- ++i) {
- destructuring_assignments_to_rewrite_[i]->set_scope(scope);
+ void AdoptDestructuringAssignmentsFromParentState(int pos) {
+ const auto& outer_assignments =
+ outer_function_state_->destructuring_assignments_to_rewrite_;
+ DCHECK_GE(outer_assignments.length(), pos);
+ for (int i = pos; i < outer_assignments.length(); ++i) {
+ auto expr = outer_assignments[i];
+ expr->set_scope(scope_);
+ destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
}
+ outer_function_state_->RewindDestructuringAssignments(pos);
}
const ZoneList<RewritableExpressionT>&
@@ -451,14 +470,6 @@ class ParserBase {
destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
}
- void AddNonPatternForRewriting(RewritableExpressionT expr, bool* ok) {
- non_patterns_to_rewrite_.Add(expr, scope_->zone());
- if (non_patterns_to_rewrite_.length() >=
- std::numeric_limits<uint16_t>::max()) {
- *ok = false;
- }
- }
-
// Properties count estimation.
int expected_property_count_;
@@ -471,6 +482,9 @@ class ParserBase {
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
+ // A reason, if any, why this function should not be optimized.
+ BailoutReason dont_optimize_reason_;
+
// Record whether the next (=== immediately following) function literal is
// preceded by a parenthesis / exclamation mark. Also record the previous
// state.
@@ -1078,10 +1092,8 @@ class ParserBase {
return ParsePrimaryExpression(&is_async, ok);
}
- // This method wraps the parsing of the expression inside a new expression
- // classifier and calls RewriteNonPattern if parsing is successful.
- // It should be used whenever we're parsing an expression that is known
- // to not be a pattern or part of a pattern.
+ // Use when parsing an expression that is known to not be a pattern or part
+ // of a pattern.
V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
// This method does not wrap the parsing of the expression inside a
@@ -1201,14 +1213,15 @@ class ParserBase {
// by value. The method is expected to add the parsed statements to the
// list. This works because in the case of the parser, StatementListT is
// a pointer whereas the preparser does not really modify the body.
- V8_INLINE void ParseStatementList(StatementListT body, int end_token,
+ V8_INLINE void ParseStatementList(StatementListT body, Token::Value end_token,
bool* ok) {
LazyParsingResult result = ParseStatementList(body, end_token, false, ok);
USE(result);
DCHECK_EQ(result, kLazyParsingComplete);
}
- LazyParsingResult ParseStatementList(StatementListT body, int end_token,
- bool may_abort, bool* ok);
+ LazyParsingResult ParseStatementList(StatementListT body,
+ Token::Value end_token, bool may_abort,
+ bool* ok);
StatementT ParseStatementListItem(bool* ok);
StatementT ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok) {
return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
@@ -1463,21 +1476,18 @@ class ParserBase {
// Accumulates the classifier that is on top of the stack (inner) to
// the one that is right below (outer) and pops the inner.
- V8_INLINE void Accumulate(unsigned productions,
- bool merge_non_patterns = true) {
+ V8_INLINE void Accumulate(unsigned productions) {
DCHECK_NOT_NULL(classifier_);
ExpressionClassifier* previous = classifier_->previous();
DCHECK_NOT_NULL(previous);
- previous->Accumulate(classifier_, productions, merge_non_patterns);
+ previous->Accumulate(classifier_, productions);
classifier_ = previous;
}
V8_INLINE void AccumulateNonBindingPatternErrors() {
- static const bool kMergeNonPatterns = true;
this->Accumulate(ExpressionClassifier::AllProductions &
- ~(ExpressionClassifier::BindingPatternProduction |
- ExpressionClassifier::LetPatternProduction),
- kMergeNonPatterns);
+ ~(ExpressionClassifier::BindingPatternProduction |
+ ExpressionClassifier::LetPatternProduction));
}
// Pops and discards the classifier that is on top of the stack
@@ -1534,9 +1544,11 @@ class ParserBase {
bool allow_harmony_do_expressions_;
bool allow_harmony_function_sent_;
bool allow_harmony_public_fields_;
+ bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
- bool allow_harmony_async_iteration_;
+ bool allow_harmony_optional_catch_binding_;
+ bool allow_harmony_private_fields_;
friend class DiscardableZoneScope;
};
@@ -1553,6 +1565,7 @@ ParserBase<Impl>::FunctionState::FunctionState(
destructuring_assignments_to_rewrite_(16, scope->zone()),
non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
+ dont_optimize_reason_(BailoutReason::kNoReason),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
@@ -1587,6 +1600,7 @@ void ParserBase<Impl>::GetUnexpectedTokenMessage(
case Token::STRING:
*message = MessageTemplate::kUnexpectedTokenString;
break;
+ case Token::PRIVATE_NAME:
case Token::IDENTIFIER:
*message = MessageTemplate::kUnexpectedTokenIdentifier;
break;
@@ -1664,6 +1678,13 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
if (next == Token::IDENTIFIER || next == Token::ASYNC ||
(next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
IdentifierT name = impl()->GetSymbol();
+
+ if (impl()->IsArguments(name) && scope()->ShouldBanArguments()) {
+ ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
+ *ok = false;
+ return impl()->NullIdentifier();
+ }
+
// When this function is used to read a formal parameter, we don't always
// know whether the function is going to be strict or sloppy. Indeed for
// arrow functions we don't always know that the identifier we are reading
@@ -1942,7 +1963,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier(this);
ExpressionT result = ParseExpressionCoverGrammar(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
return result;
}
@@ -2068,22 +2089,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
}
Expect(Token::RBRACK, CHECK_OK);
- ExpressionT result =
- factory()->NewArrayLiteral(values, first_spread_index, pos);
- if (first_spread_index >= 0) {
- auto rewritable = factory()->NewRewritableExpression(result, scope());
- impl()->QueueNonPatternForRewriting(rewritable, ok);
- if (!*ok) {
- // If the non-pattern rewriting mechanism is used in the future for
- // rewriting other things than spreads, this error message will have
- // to change. Also, this error message will never appear while pre-
- // parsing (this is OK, as it is an implementation limitation).
- ReportMessage(MessageTemplate::kTooManySpreads);
- return impl()->NullExpression();
- }
- result = rewritable;
- }
- return result;
+ return factory()->NewArrayLiteral(values, first_spread_index, pos);
}
template <class Impl>
@@ -2108,6 +2114,9 @@ bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
case Token::SEMICOLON:
*kind = PropertyKind::kClassField;
return true;
+ case Token::PRIVATE_NAME:
+ *kind = PropertyKind::kClassField;
+ return true;
default:
break;
}
@@ -2137,8 +2146,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
!scanner()->HasAnyLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
token = peek();
- if (token == Token::MUL && allow_harmony_async_iteration() &&
- !scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (token == Token::MUL && !scanner()->HasAnyLineTerminatorBeforeNext()) {
Consume(Token::MUL);
token = peek();
*is_generator = true;
@@ -2198,7 +2206,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
Consume(Token::LBRACK);
ExpressionClassifier computed_name_classifier(this);
expression = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
Expect(Token::RBRACK, CHECK_OK);
break;
@@ -2270,6 +2278,8 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
PropertyKind kind = PropertyKind::kNotSet;
Token::Value name_token = peek();
+ DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME,
+ allow_harmony_private_fields());
int name_token_position = scanner()->peek_location().beg_pos;
IdentifierT name = impl()->NullIdentifier();
@@ -2285,12 +2295,22 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
peek() == Token::RBRACE) {
name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
name_expression = factory()->NewStringLiteral(name, position());
+ } else if (peek() == Token::PRIVATE_NAME) {
+ DCHECK(allow_harmony_private_fields());
+ // TODO(gsathya): Make a better error message for this.
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullLiteralProperty();
} else {
*is_static = true;
name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
+ } else if (name_token == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ name_expression = factory()->NewStringLiteral(name, position());
} else {
name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
@@ -2312,9 +2332,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// as an uninitialized field.
case PropertyKind::kShorthandProperty:
case PropertyKind::kValueProperty:
- if (allow_harmony_public_fields()) {
+ if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
*property_kind = ClassLiteralProperty::FIELD;
- if (!*is_computed_name) {
+ if (*is_static && !allow_harmony_static_fields()) {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullLiteralProperty();
+ }
+ if (!*is_computed_name && name_token != Token::PRIVATE_NAME) {
checker->CheckClassFieldName(*is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
@@ -2362,7 +2387,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind = ClassLiteralProperty::METHOD;
@@ -2394,7 +2419,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
*property_kind =
@@ -2427,7 +2452,8 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
: class_info->instance_fields_scope;
if (initializer_scope == nullptr) {
- initializer_scope = NewFunctionScope(FunctionKind::kConciseMethod);
+ initializer_scope =
+ NewFunctionScope(FunctionKind::kClassFieldsInitializerFunction);
// TODO(gsathya): Make scopes be non contiguous.
initializer_scope->set_start_position(scanner()->location().end_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
@@ -2441,7 +2467,7 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info,
initializer =
ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpression));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpression));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpression));
} else {
initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
@@ -2560,7 +2586,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ExpressionClassifier rhs_classifier(this);
ExpressionT rhs = ParseAssignmentExpression(
true, CHECK_OK_CUSTOM(NullLiteralProperty));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullLiteralProperty));
+ ValidateExpression(CHECK_OK_CUSTOM(NullLiteralProperty));
AccumulateFormalParameterContainmentErrors();
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
kNoSourcePosition);
@@ -2595,7 +2621,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
@@ -2627,7 +2653,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
- FunctionLiteral::kAccessorOrMethod, language_mode(),
+ FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullLiteralProperty));
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
@@ -2711,8 +2737,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
MessageTemplate::kTooManyArguments);
}
- return factory()->NewObjectLiteral(
- properties, number_of_boilerplate_properties, pos, has_rest_property);
+ return impl()->InitializeObjectLiteral(factory()->NewObjectLiteral(
+ properties, number_of_boilerplate_properties, pos, has_rest_property));
}
template <typename Impl>
@@ -2738,7 +2764,7 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
*is_simple_parameter_list = false;
}
if (!maybe_arrow) {
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
}
if (is_spread) {
if (is_simple_parameter_list != nullptr) {
@@ -2784,7 +2810,7 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
if (!maybe_arrow || peek() != Token::ARROW) {
if (maybe_arrow) {
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
+ ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
}
}
@@ -2864,7 +2890,6 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// Because the arrow's parameters were parsed in the outer scope,
// we need to fix up the scope chain appropriately.
scope_snapshot.Reparent(scope);
- function_state_->SetDestructuringAssignmentsScope(rewritable_length, scope);
FormalParametersT parameters(scope);
if (!classifier()->is_simple_parameter_list()) {
@@ -2914,15 +2939,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
productions &= ~ExpressionClassifier::ExpressionProduction;
}
- if (!Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- // Pending non-pattern expressions must be merged.
- Accumulate(productions);
- return expression;
- } else {
- // Pending non-pattern expressions must be discarded.
- Accumulate(productions, false);
- }
+ Accumulate(productions);
+ if (!Token::IsAssignmentOp(peek())) return expression;
if (is_destructuring_assignment) {
ValidateAssignmentPattern(CHECK_OK);
@@ -2945,7 +2963,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
ExpressionClassifier rhs_classifier(this);
ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
// We try to estimate the set of properties set by constructors. We define a
@@ -3019,7 +3037,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
break;
}
}
@@ -3052,7 +3070,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
// We start using the binary expression parser for prec >= 4 only!
ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3067,7 +3085,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
left = ParseAssignmentExpression(true, CHECK_OK);
AccumulateNonBindingPatternErrors();
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
ExpressionT right;
{
SourceRangeScope range_scope(scanner(), &else_range);
@@ -3076,7 +3094,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
right = ParseAssignmentExpression(accept_IN, CHECK_OK);
AccumulateNonBindingPatternErrors();
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
ExpressionT expr = factory()->NewConditional(expression, left, right, pos);
impl()->RecordConditionalSourceRange(expr, then_range, else_range);
return expr;
@@ -3093,7 +3111,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3105,7 +3123,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
const int next_prec = is_right_associative ? prec1 : prec1 + 1;
ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
right_range_scope.Finalize();
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
continue;
@@ -3171,7 +3189,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
}
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (impl()->IsIdentifier(expression)) {
@@ -3200,7 +3218,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
impl()->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -3211,12 +3229,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
classifier()->RecordFormalParameterInitializerError(
scanner()->peek_location(),
MessageTemplate::kAwaitExpressionFormalParameter);
-
int await_pos = peek_position();
Consume(Token::AWAIT);
ExpressionT value = ParseUnaryExpression(CHECK_OK);
+ classifier()->RecordBindingPatternError(
+ Scanner::Location(await_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+
ExpressionT expr = factory()->NewAwait(value, await_pos);
impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
return expr;
@@ -3242,7 +3263,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
impl()->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
Token::Value next = Next();
expression =
@@ -3267,13 +3288,13 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
while (true) {
switch (peek()) {
case Token::LBRACK: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
@@ -3281,7 +3302,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
case Token::LPAREN: {
int pos;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
if (scanner()->current_token() == Token::IDENTIFIER ||
scanner()->current_token() == Token::SUPER ||
@@ -3373,7 +3394,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
}
case Token::PERIOD: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
@@ -3387,7 +3408,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
result = ParseTemplateLiteral(result, position(), true, CHECK_OK);
@@ -3446,7 +3467,7 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
} else {
result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
}
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -3537,7 +3558,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
is_strict_reserved_name ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
function_kind, function_token_position, function_type, language_mode(),
- CHECK_OK);
+ nullptr, CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
result = ParseSuperExpression(is_new, CHECK_OK);
@@ -3657,14 +3678,14 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
switch (peek()) {
case Token::LBRACK: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
impl()->PushPropertyName(index);
Expect(Token::RBRACK, CHECK_OK);
@@ -3672,13 +3693,19 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
}
case Token::PERIOD: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = peek_position();
- IdentifierT name = ParseIdentifierName(CHECK_OK);
+ IdentifierT name;
+ if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ } else {
+ name = ParseIdentifierName(CHECK_OK);
+ }
expression = factory()->NewProperty(
expression, factory()->NewStringLiteral(name, pos), pos);
impl()->PushLiteralName(name);
@@ -3687,7 +3714,7 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
*is_async = false;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
int pos;
@@ -3743,7 +3770,7 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
}
ExpressionClassifier init_classifier(this);
initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+ ValidateExpression(CHECK_OK_CUSTOM(Void));
ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
parameters->is_simple = false;
DiscardExpressionClassifier();
@@ -3882,7 +3909,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
ExpressionClassifier classifier(this);
value = ParseAssignmentExpression(var_context != kForStatement,
CHECK_OK_CUSTOM(NullStatement));
- impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullStatement));
+ ValidateExpression(CHECK_OK_CUSTOM(NullStatement));
variable_loc.end_pos = scanner()->location().end_pos;
if (!parsing_result->first_initializer_loc.IsValid()) {
@@ -3997,7 +4024,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
const bool is_async = flags & ParseFunctionFlags::kIsAsync;
DCHECK(!is_generator || !is_async);
- if (allow_harmony_async_iteration() && is_async && Check(Token::MUL)) {
+ if (is_async && Check(Token::MUL)) {
// Async generator
is_generator = true;
}
@@ -4025,7 +4052,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
FunctionLiteralT function = impl()->ParseFunctionLiteral(
name, scanner()->location(), name_validity, kind, pos,
- FunctionLiteral::kDeclaration, language_mode(),
+ FunctionLiteral::kDeclaration, language_mode(), nullptr,
CHECK_OK_CUSTOM(NullStatement));
// In ES6, a function behaves as a lexical binding, except in
@@ -4096,6 +4123,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
bool* ok) {
+ function_state_->DisableOptimization(BailoutReason::kNativeFunctionLiteral);
+
int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
// Allow "eval" or "arguments" for backward compatibility.
@@ -4150,6 +4179,11 @@ void ParserBase<Impl>::ParseFunctionBody(
body = inner_block->statements();
}
+ // If we are parsing the source as if it is wrapped in a function, the source
+ // ends without a closing brace.
+ Token::Value closing_token =
+ function_type == FunctionLiteral::kWrapped ? Token::EOS : Token::RBRACE;
+
{
BlockState block_state(&scope_, inner_scope);
@@ -4162,7 +4196,7 @@ void ParserBase<Impl>::ParseFunctionBody(
} else if (IsAsyncFunction(kind)) {
ParseAsyncFunctionBody(inner_scope, body, CHECK_OK_VOID);
} else {
- ParseStatementList(body, Token::RBRACE, CHECK_OK_VOID);
+ ParseStatementList(body, closing_token, CHECK_OK_VOID);
}
if (IsDerivedConstructor(kind)) {
@@ -4172,7 +4206,7 @@ void ParserBase<Impl>::ParseFunctionBody(
}
}
- Expect(Token::RBRACE, CHECK_OK_VOID);
+ Expect(closing_token, CHECK_OK_VOID);
scope()->set_end_position(scanner()->location().end_pos);
if (!parameters.is_simple) {
@@ -4298,11 +4332,11 @@ typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
bool accept_IN, const FormalParametersT& formal_parameters,
int rewritable_length, bool* ok) {
- const RuntimeCallStats::CounterId counters[2][2] = {
- {&RuntimeCallStats::ParseBackgroundArrowFunctionLiteral,
- &RuntimeCallStats::ParseArrowFunctionLiteral},
- {&RuntimeCallStats::PreParseBackgroundArrowFunctionLiteral,
- &RuntimeCallStats::PreParseArrowFunctionLiteral}};
+ const RuntimeCallCounterId counters[2][2] = {
+ {RuntimeCallCounterId::kParseBackgroundArrowFunctionLiteral,
+ RuntimeCallCounterId::kParseArrowFunctionLiteral},
+ {RuntimeCallCounterId::kPreParseBackgroundArrowFunctionLiteral,
+ RuntimeCallCounterId::kPreParseArrowFunctionLiteral}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
counters[Impl::IsPreParser()][parsing_on_main_thread_]);
@@ -4337,6 +4371,11 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
FunctionState function_state(&function_state_, &scope_,
formal_parameters.scope);
+ // Move any queued destructuring assignments which appeared
+ // in this function's parameter list into its own function_state.
+ function_state.AdoptDestructuringAssignmentsFromParentState(
+ rewritable_length);
+
Expect(Token::ARROW, CHECK_OK);
if (peek() == Token::LBRACE) {
@@ -4360,14 +4399,10 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
USE(result);
formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
false);
-
// Discard any queued destructuring assignments which appeared
- // in this function's parameter list.
- FunctionState* parent_state = function_state.outer();
- DCHECK_NOT_NULL(parent_state);
- DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
- rewritable_length);
- parent_state->RewindDestructuringAssignments(rewritable_length);
+ // in this function's parameter list, and which were adopted
+ // into this function state, above.
+ function_state.RewindDestructuringAssignments(0);
} else {
Consume(Token::LBRACE);
body = impl()->NewStatementList(8);
@@ -4467,9 +4502,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
scope()->set_start_position(scanner()->location().end_pos);
if (Check(Token::EXTENDS)) {
+ FuncNameInferrer::State fni_state(fni_);
ExpressionClassifier extends_classifier(this);
class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
}
@@ -4501,7 +4537,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
class_info.computed_field_count++;
}
is_constructor &= class_info.has_seen_constructor;
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
impl()->DeclareClassProperty(name, property, property_kind, is_static,
@@ -4526,7 +4562,7 @@ void ParserBase<Impl>::ParseSingleExpressionFunctionBody(StatementListT body,
ExpressionClassifier classifier(this);
ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK_VOID);
- impl()->RewriteNonPattern(CHECK_OK_VOID);
+ ValidateExpression(CHECK_OK_VOID);
if (is_async) {
BlockT block = factory()->NewBlock(1, true);
@@ -4564,7 +4600,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
IdentifierT name = impl()->NullIdentifier();
FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
- bool is_generator = allow_harmony_async_iteration() && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
const bool kIsAsync = true;
const FunctionKind kind = FunctionKindFor(is_generator, kIsAsync);
@@ -4590,7 +4626,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
name, scanner()->location(),
is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
- kind, pos, type, language_mode(), CHECK_OK);
+ kind, pos, type, language_mode(), nullptr, CHECK_OK);
}
template <typename Impl>
@@ -4650,7 +4686,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
int expr_pos = peek_position();
ExpressionT expression = ParseExpressionCoverGrammar(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
impl()->AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
@@ -4781,8 +4817,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseDoExpression(
template <typename Impl>
typename ParserBase<Impl>::LazyParsingResult
-ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
- bool may_abort, bool* ok) {
+ParserBase<Impl>::ParseStatementList(StatementListT body,
+ Token::Value end_token, bool may_abort,
+ bool* ok) {
// StatementList ::
// (StatementListItem)* <end_token>
@@ -4953,8 +4990,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::WHILE:
return ParseWhileStatement(labels, ok);
case Token::FOR:
- if (V8_UNLIKELY(allow_harmony_async_iteration() && is_async_function() &&
- PeekAhead() == Token::AWAIT)) {
+ if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) {
return ParseForAwaitStatement(labels, ok);
}
return ParseForStatement(labels, ok);
@@ -5175,11 +5211,6 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
// Parsed expression statement, followed by semicolon.
ExpectSemicolon(CHECK_OK);
- if (labels != nullptr) {
- // TODO(adamk): Also measure in the PreParser by passing something
- // non-null as |labels|.
- impl()->CountUsage(v8::Isolate::kLabeledExpressionStatement);
- }
return factory()->NewExpressionStatement(expr, pos);
}
@@ -5204,8 +5235,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
- SourceRangeScope range_scope(scanner(), &else_range);
+ else_range = SourceRange::ContinuationOf(then_range);
else_statement = ParseScopedStatement(labels, CHECK_OK);
+ else_range.end = scanner_->location().end_pos;
} else {
else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
@@ -5547,50 +5579,60 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
{
SourceRangeScope catch_range_scope(scanner(), &catch_range);
if (Check(Token::CATCH)) {
- Expect(Token::LPAREN, CHECK_OK);
- catch_info.scope = NewScope(CATCH_SCOPE);
- catch_info.scope->set_start_position(scanner()->location().beg_pos);
-
- {
- BlockState catch_block_state(&scope_, catch_info.scope);
+ bool has_binding;
+ if (allow_harmony_optional_catch_binding()) {
+ has_binding = Check(Token::LPAREN);
+ } else {
+ has_binding = true;
+ Expect(Token::LPAREN, CHECK_OK);
+ }
- catch_block = factory()->NewBlock(16, false);
+ if (has_binding) {
+ catch_info.scope = NewScope(CATCH_SCOPE);
+ catch_info.scope->set_start_position(scanner()->location().beg_pos);
- // Create a block scope to hold any lexical declarations created
- // as part of destructuring the catch parameter.
{
- BlockState catch_variable_block_state(zone(), &scope_);
- scope()->set_start_position(scanner()->location().beg_pos);
- typename Types::Target target(this, catch_block);
-
- // This does not simply call ParsePrimaryExpression to avoid
- // ExpressionFromIdentifier from being called in the first
- // branch, which would introduce an unresolved symbol and mess
- // with arrow function names.
- if (peek_any_identifier()) {
- catch_info.name =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- } else {
- ExpressionClassifier pattern_classifier(this);
- catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
- ValidateBindingPattern(CHECK_OK);
- }
+ BlockState catch_block_state(&scope_, catch_info.scope);
+
+ catch_block = factory()->NewBlock(16, false);
+
+ // Create a block scope to hold any lexical declarations created
+ // as part of destructuring the catch parameter.
+ {
+ BlockState catch_variable_block_state(zone(), &scope_);
+ scope()->set_start_position(scanner()->location().beg_pos);
+
+ // This does not simply call ParsePrimaryExpression to avoid
+ // ExpressionFromIdentifier from being called in the first
+ // branch, which would introduce an unresolved symbol and mess
+ // with arrow function names.
+ if (peek_any_identifier()) {
+ catch_info.name =
+ ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ } else {
+ ExpressionClassifier pattern_classifier(this);
+ catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
+ ValidateBindingPattern(CHECK_OK);
+ }
- Expect(Token::RPAREN, CHECK_OK);
- impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
- if (!impl()->IsNull(catch_info.init_block)) {
- catch_block->statements()->Add(catch_info.init_block, zone());
- }
+ Expect(Token::RPAREN, CHECK_OK);
+ impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
+ if (!impl()->IsNull(catch_info.init_block)) {
+ catch_block->statements()->Add(catch_info.init_block, zone());
+ }
- catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
- catch_block->statements()->Add(catch_info.inner_block, zone());
- impl()->ValidateCatchBlock(catch_info, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
- catch_block->set_scope(scope()->FinalizeBlockScope());
+ catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
+ catch_block->statements()->Add(catch_info.inner_block, zone());
+ impl()->ValidateCatchBlock(catch_info, CHECK_OK);
+ scope()->set_end_position(scanner()->location().end_pos);
+ catch_block->set_scope(scope()->FinalizeBlockScope());
+ }
}
- }
- catch_info.scope->set_end_position(scanner()->location().end_pos);
+ catch_info.scope->set_end_position(scanner()->location().end_pos);
+ } else {
+ catch_block = ParseBlock(nullptr, CHECK_OK);
+ }
}
}
@@ -5687,7 +5729,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
if (is_destructuring) {
ValidateAssignmentPattern(CHECK_OK);
} else {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
}
if (is_for_each) {
@@ -5750,7 +5792,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
} else {
enumerable = ParseExpression(true, CHECK_OK);
}
@@ -5826,7 +5868,7 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
enumerable = ParseAssignmentExpression(true, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
} else {
enumerable = ParseExpression(true, CHECK_OK);
}
@@ -5945,7 +5987,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
ZoneList<const AstRawString*>* labels, bool* ok) {
// for await '(' ForDeclaration of AssignmentExpression ')'
DCHECK(is_async_function());
- DCHECK(allow_harmony_async_iteration());
int stmt_pos = peek_position();
@@ -6014,7 +6055,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
ValidateAssignmentPattern(CHECK_OK);
} else {
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
each_variable = CheckAndRewriteReferenceExpression(
lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
kSyntaxError, CHECK_OK);
@@ -6030,7 +6071,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
{
ExpressionClassifier classifier(this);
iterable = ParseAssignmentExpression(kAllowIn, CHECK_OK);
- impl()->RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
}
Expect(Token::RPAREN, CHECK_OK);
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 4d291a741e..0497958c82 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -276,18 +276,18 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
return true;
}
case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1F);
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t shift = DoubleToInt32(y_val) & 0x1F;
uint32_t value = DoubleToUint32(x_val) >> shift;
*x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t shift = DoubleToInt32(y_val) & 0x1F;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
*x = factory()->NewNumberLiteral(value, pos);
return true;
@@ -506,7 +506,7 @@ Parser::Parser(ParseInfo* info)
info->runtime_call_stats(), info->logger(),
info->script().is_null() ? -1 : info->script()->id(),
info->is_module(), true),
- scanner_(info->unicode_cache(), use_counts_),
+ scanner_(info->unicode_cache()),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
@@ -543,10 +543,12 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_public_fields(FLAG_harmony_public_fields);
+ set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
- set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
set_allow_harmony_bigint(FLAG_harmony_bigint);
+ set_allow_harmony_optional_catch_binding(FLAG_harmony_optional_catch_binding);
+ set_allow_harmony_private_fields(FLAG_harmony_private_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -592,8 +594,9 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// called in the main thread.
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
- : &RuntimeCallStats::ParseProgram);
+ runtime_call_stats_, info->is_eval()
+ ? RuntimeCallCounterId::kParseEval
+ : RuntimeCallCounterId::kParseProgram);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -667,11 +670,9 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
DeclarationScope* scope = outer->AsDeclarationScope();
-
scope->set_start_position(0);
FunctionState function_state(&function_state_, &scope_, scope);
-
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
@@ -689,7 +690,6 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
var->AllocateTo(VariableLocation::PARAMETER, 0);
PrepareGeneratorVariables();
- scope->ForceContextAllocation();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body->Add(
@@ -699,6 +699,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
ParseModuleItemList(body, &ok);
ok = ok && module()->Validate(this->scope()->AsModuleScope(),
pending_error_handler(), zone());
+ } else if (info->is_wrapped_as_function()) {
+ ParseWrapped(info, body, scope, zone(), &ok);
} else {
// Don't count the mode in the use counters--give the program a chance
// to enable script-wide strict mode below.
@@ -751,13 +753,53 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
return result;
}
+ZoneList<const AstRawString*>* Parser::PrepareWrappedArguments(ParseInfo* info,
+ Zone* zone) {
+ DCHECK(parsing_on_main_thread_);
+ Handle<FixedArray> arguments(info->script()->wrapped_arguments());
+ int arguments_length = arguments->length();
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ new (zone) ZoneList<const AstRawString*>(arguments_length, zone);
+ for (int i = 0; i < arguments_length; i++) {
+ const AstRawString* argument_string = ast_value_factory()->GetString(
+ Handle<String>(String::cast(arguments->get(i))));
+ arguments_for_wrapped_function->Add(argument_string, zone);
+ }
+ return arguments_for_wrapped_function;
+}
+
+void Parser::ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
+ DeclarationScope* outer_scope, Zone* zone, bool* ok) {
+ DCHECK(info->is_wrapped_as_function());
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+
+ // Set function and block state for the outer eval scope.
+ DCHECK(outer_scope->is_eval_scope());
+ FunctionState function_state(&function_state_, &scope_, outer_scope);
+
+ const AstRawString* function_name = nullptr;
+ Scanner::Location location(0, 0);
+
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ PrepareWrappedArguments(info, zone);
+
+ FunctionLiteral* function_literal = ParseFunctionLiteral(
+ function_name, location, kSkipFunctionNameCheck, kNormalFunction,
+ kNoSourcePosition, FunctionLiteral::kWrapped, LanguageMode::kSloppy,
+ arguments_for_wrapped_function, CHECK_OK_VOID);
+
+ Statement* return_statement = factory()->NewReturnStatement(
+ function_literal, kNoSourcePosition, kNoSourcePosition);
+ body->Add(return_statement, zone);
+}
+
FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<SharedFunctionInfo> shared_info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
- &RuntimeCallStats::ParseFunction);
+ RuntimeCallCounterId::kParseFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -794,7 +836,9 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
}
static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
- if (info->is_declaration()) {
+ if (info->is_wrapped_as_function()) {
+ return FunctionLiteral::kWrapped;
+ } else if (info->is_declaration()) {
return FunctionLiteral::kDeclaration;
} else if (info->is_named_expression()) {
return FunctionLiteral::kNamedExpression;
@@ -927,9 +971,13 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
info->start_position(), info->end_position());
} else {
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function =
+ info->is_wrapped_as_function() ? PrepareWrappedArguments(info, zone())
+ : nullptr;
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, function_type, info->language_mode(), &ok);
+ kNoSourcePosition, function_type, info->language_mode(),
+ arguments_for_wrapped_function, &ok);
}
if (ok) {
@@ -1251,7 +1299,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
int pos = position();
ExpressionClassifier classifier(this);
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
- RewriteNonPattern(CHECK_OK);
+ ValidateExpression(CHECK_OK);
SetFunctionName(value, ast_value_factory()->default_string());
const AstRawString* local_name =
@@ -1714,7 +1762,6 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
if (catch_block != nullptr && finally_block != nullptr) {
// If we have both, create an inner try/catch.
- DCHECK_NOT_NULL(catch_info.scope);
TryCatchStatement* statement;
statement = factory()->NewTryCatchStatement(try_block, catch_info.scope,
catch_block, kNoSourcePosition);
@@ -1727,7 +1774,6 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
if (catch_block != nullptr) {
DCHECK_NULL(finally_block);
- DCHECK_NOT_NULL(catch_info.scope);
TryCatchStatement* stmt = factory()->NewTryCatchStatement(
try_block, catch_info.scope, catch_block, pos);
RecordTryCatchStatementSourceRange(stmt, catch_range);
@@ -1844,13 +1890,11 @@ void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
// !%_IsJSReceiver(result = Await(iterator.next())) &&
// %ThrowIteratorResultNotAnObject(result)
// [endif]
-Expression* Parser::BuildIteratorNextResult(Expression* iterator,
+Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
+ VariableProxy* next,
Variable* result, IteratorType type,
int pos) {
- Expression* next_literal = factory()->NewStringLiteral(
- ast_value_factory()->next_string(), kNoSourcePosition);
- Expression* next_property =
- factory()->NewProperty(iterator, next_literal, kNoSourcePosition);
+ Expression* next_property = factory()->NewResolvedProperty(iterator, next);
ZoneList<Expression*>* next_arguments =
new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call =
@@ -2053,6 +2097,7 @@ Statement* Parser::InitializeForOfStatement(
auto avfactory = ast_value_factory();
Variable* iterator = NewTemporary(avfactory->dot_iterator_string());
+ Variable* next = NewTemporary(avfactory->empty_string());
Variable* result = NewTemporary(avfactory->dot_result_string());
Variable* completion = NewTemporary(avfactory->empty_string());
@@ -2065,6 +2110,17 @@ Statement* Parser::InitializeForOfStatement(
iterable->position());
}
+ Expression* assign_next;
+ {
+ assign_next = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(next),
+ factory()->NewProperty(factory()->NewVariableProxy(iterator),
+ factory()->NewStringLiteral(
+ avfactory->next_string(), kNoSourcePosition),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ }
+
// [if (IteratorType == kNormal)]
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
@@ -2074,9 +2130,10 @@ Statement* Parser::InitializeForOfStatement(
// [endif]
Expression* next_result;
{
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- next_result =
- BuildIteratorNextResult(iterator_proxy, result, type, next_result_pos);
+ VariableProxy* iterator_proxy = factory()->NewVariableProxy(iterator);
+ VariableProxy* next_proxy = factory()->NewVariableProxy(next);
+ next_result = BuildIteratorNextResult(iterator_proxy, next_proxy, result,
+ type, next_result_pos);
}
// result.done
@@ -2146,8 +2203,8 @@ Statement* Parser::InitializeForOfStatement(
body = block;
}
- for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
- assign_each);
+ for_of->Initialize(body, iterator, assign_iterator, assign_next, next_result,
+ result_done, assign_each);
return finalize ? FinalizeForOfStatement(for_of, completion, type, nopos)
: for_of;
}
@@ -2510,7 +2567,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok) {
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -2520,8 +2578,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Setter ::
// '(' PropertySetParameterList ')' '{' FunctionBody '}'
+ bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+ DCHECK_EQ(is_wrapped, arguments_for_wrapped_function != nullptr);
+
int pos = function_token_pos == kNoSourcePosition ? peek_position()
: function_token_pos;
+ DCHECK_NE(kNoSourcePosition, pos);
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
@@ -2535,7 +2597,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
FunctionLiteral::EagerCompileHint eager_compile_hint =
- function_state_->next_function_is_likely_called()
+ function_state_->next_function_is_likely_called() || is_wrapped
? FunctionLiteral::kShouldEagerCompile
: default_eager_compile_hint();
@@ -2587,8 +2649,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
parsing_on_main_thread_
- ? &RuntimeCallStats::ParseFunctionLiteral
- : &RuntimeCallStats::ParseBackgroundFunctionLiteral);
+ ? RuntimeCallCounterId::kParseFunctionLiteral
+ : RuntimeCallCounterId::kParseBackgroundFunctionLiteral);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -2650,7 +2712,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) scope->set_needs_migration();
#endif
- Expect(Token::LPAREN, CHECK_OK);
+ if (!is_wrapped) Expect(Token::LPAREN, CHECK_OK);
scope->set_start_position(scanner()->location().beg_pos);
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
@@ -2661,6 +2723,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) {
DCHECK(parse_lazily());
DCHECK(is_lazy_top_level_function || is_lazy_inner_function);
+ DCHECK(!is_wrapped);
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
LazyParsingResult result = SkipFunction(
@@ -2687,7 +2750,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
body = ParseFunction(function_name, pos, kind, function_type, scope,
&num_parameters, &function_length,
&has_duplicate_parameters, &expected_property_count,
- CHECK_OK);
+ arguments_for_wrapped_function, CHECK_OK);
}
DCHECK_EQ(should_preparse, temp_zoned_);
@@ -2705,18 +2768,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (V8_UNLIKELY(FLAG_runtime_stats)) {
if (should_preparse) {
- RuntimeCallStats::CounterId counter_id =
+ RuntimeCallCounterId counter_id =
parsing_on_main_thread_
- ? &RuntimeCallStats::PreParseWithVariableResolution
- : &RuntimeCallStats::PreParseBackgroundWithVariableResolution;
+ ? RuntimeCallCounterId::kPreParseWithVariableResolution
+ : RuntimeCallCounterId::
+ kPreParseBackgroundWithVariableResolution;
if (is_top_level) {
- counter_id =
- parsing_on_main_thread_
- ? &RuntimeCallStats::PreParseNoVariableResolution
- : &RuntimeCallStats::PreParseBackgroundNoVariableResolution;
+ counter_id = parsing_on_main_thread_
+ ? RuntimeCallCounterId::kPreParseNoVariableResolution
+ : RuntimeCallCounterId::
+ kPreParseBackgroundNoVariableResolution;
+ }
+ if (runtime_call_stats_) {
+ runtime_call_stats_->CorrectCurrentCounterId(counter_id);
}
- RuntimeCallStats::CorrectCurrentCounterId(runtime_call_stats_,
- counter_id);
}
}
@@ -3118,11 +3183,14 @@ ZoneList<Statement*>* Parser::ParseFunction(
const AstRawString* function_name, int pos, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
- bool* has_duplicate_parameters, int* expected_property_count, bool* ok) {
+ bool* has_duplicate_parameters, int* expected_property_count,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionState function_state(&function_state_, &scope_, function_scope);
+ bool is_wrapped = function_type == FunctionLiteral::kWrapped;
+
DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
@@ -3136,34 +3204,53 @@ ZoneList<Statement*>* Parser::ParseFunction(
}
ParserFormalParameters formals(function_scope);
- ParseFormalParameterList(&formals, CHECK_OK);
- if (expected_parameters_end_pos != kNoSourcePosition) {
- // Check for '(' or ')' shenanigans in the parameter string for dynamic
- // functions.
- int position = peek_position();
- if (position < expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(position, position + 1),
- MessageTemplate::kArgStringTerminatesParametersEarly);
- *ok = false;
- return nullptr;
- } else if (position > expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
- expected_parameters_end_pos),
- MessageTemplate::kUnexpectedEndOfArgString);
- *ok = false;
- return nullptr;
+
+ if (is_wrapped) {
+ // For a function implicitly wrapped in function header and footer, the
+ // function arguments are provided separately to the source, and are
+ // declared directly here.
+ int arguments_length = arguments_for_wrapped_function->length();
+ for (int i = 0; i < arguments_length; i++) {
+ const bool is_rest = false;
+ Expression* argument = ExpressionFromIdentifier(
+ arguments_for_wrapped_function->at(i), kNoSourcePosition);
+ AddFormalParameter(&formals, argument, NullExpression(),
+ kNoSourcePosition, is_rest);
+ }
+ DCHECK_EQ(arguments_length, formals.num_parameters());
+ DeclareFormalParameters(formals.scope, formals.params, formals.is_simple);
+ } else {
+ // For a regular function, the function arguments are parsed from source.
+ DCHECK_NULL(arguments_for_wrapped_function);
+ ParseFormalParameterList(&formals, CHECK_OK);
+ if (expected_parameters_end_pos != kNoSourcePosition) {
+ // Check for '(' or ')' shenanigans in the parameter string for dynamic
+ // functions.
+ int position = peek_position();
+ if (position < expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(position, position + 1),
+ MessageTemplate::kArgStringTerminatesParametersEarly);
+ *ok = false;
+ return nullptr;
+ } else if (position > expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
+ expected_parameters_end_pos),
+ MessageTemplate::kUnexpectedEndOfArgString);
+ *ok = false;
+ return nullptr;
+ }
}
+ Expect(Token::RPAREN, CHECK_OK);
+ int formals_end_position = scanner()->location().end_pos;
+
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ function_scope->start_position(),
+ formals_end_position, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
}
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
*num_parameters = formals.num_parameters();
*function_length = formals.function_length;
- CheckArityRestrictions(formals.arity, kind, formals.has_rest,
- function_scope->start_position(), formals_end_position,
- CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
-
ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(8, zone());
ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
@@ -3240,9 +3327,10 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
return;
}
- DCHECK(allow_harmony_public_fields());
+ DCHECK(allow_harmony_public_fields() || allow_harmony_private_fields());
if (is_static) {
+ DCHECK(allow_harmony_static_fields());
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
@@ -3262,6 +3350,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
FunctionLiteral* Parser::CreateInitializerFunction(
DeclarationScope* scope, ZoneList<ClassLiteral::Property*>* fields) {
+ DCHECK_EQ(scope->function_kind(),
+ FunctionKind::kClassFieldsInitializerFunction);
// function() { .. class fields initializer .. }
ZoneList<Statement*>* statements = NewStatementList(1);
InitializeClassFieldsStatement* static_fields =
@@ -3450,8 +3540,8 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
}
void Parser::ParseOnBackground(ParseInfo* info) {
- RuntimeCallTimerScope runtimeTimer(runtime_call_stats_,
- &RuntimeCallStats::ParseBackgroundProgram);
+ RuntimeCallTimerScope runtimeTimer(
+ runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
if (!info->script().is_null()) {
set_script_id(info->script()->id());
@@ -3581,11 +3671,11 @@ namespace {
// http://burtleburtle.net/bob/hash/integer.html
uint32_t HalfAvalance(uint32_t a) {
- a = (a + 0x479ab41d) + (a << 8);
- a = (a ^ 0xe4aa10ce) ^ (a >> 5);
- a = (a + 0x9942f0a6) - (a << 14);
- a = (a ^ 0x5aedd67d) ^ (a >> 3);
- a = (a + 0x17bea992) + (a << 7);
+ a = (a + 0x479AB41D) + (a << 8);
+ a = (a ^ 0xE4AA10CE) ^ (a >> 5);
+ a = (a + 0x9942F0A6) - (a << 14);
+ a = (a ^ 0x5AEDD67D) ^ (a >> 3);
+ a = (a + 0x17BEA992) + (a << 7);
return a;
}
@@ -3808,24 +3898,6 @@ void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
body->Add(block, zone());
}
-void Parser::RewriteNonPattern(bool* ok) {
- ValidateExpression(CHECK_OK_VOID);
- auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
- int begin = classifier()->GetNonPatternBegin();
- int end = non_patterns_to_rewrite->length();
- if (begin < end) {
- for (int i = begin; i < end; i++) {
- RewritableExpression* expr = non_patterns_to_rewrite->at(i);
- // TODO(adamk): Make this more typesafe.
- DCHECK(expr->expression()->IsArrayLiteral());
- ArrayLiteral* lit = expr->expression()->AsArrayLiteral();
- expr->Rewrite(RewriteSpreads(lit));
- }
- non_patterns_to_rewrite->Rewind(begin);
- }
-}
-
-
void Parser::RewriteDestructuringAssignments() {
const auto& assignments =
function_state_->destructuring_assignments_to_rewrite();
@@ -3845,102 +3917,11 @@ void Parser::RewriteDestructuringAssignments() {
}
}
-Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
- // Array literals containing spreads are rewritten using do expressions, e.g.
- // [1, 2, 3, ...x, 4, ...y, 5]
- // is roughly rewritten as:
- // do {
- // $R = [1, 2, 3];
- // for ($i of x) %AppendElement($R, $i);
- // %AppendElement($R, 4);
- // for ($j of y) %AppendElement($R, $j);
- // %AppendElement($R, 5);
- // $R
- // }
- // where $R, $i and $j are fresh temporary variables.
- ZoneList<Expression*>::iterator s = lit->FirstSpread();
- if (s == lit->EndValue()) return nullptr; // no spread, no rewriting...
- Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
- // NOTE: The value assigned to R is the whole original array literal,
- // spreads included. This will be fixed before the rewritten AST is returned.
- // $R = lit
- Expression* init_result = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(result), lit, kNoSourcePosition);
- Block* do_block = factory()->NewBlock(16, false);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(init_result, kNoSourcePosition),
- zone());
- // Traverse the array literal starting from the first spread.
- while (s != lit->EndValue()) {
- Expression* value = *s++;
- Spread* spread = value->AsSpread();
- if (spread == nullptr) {
- // If the element is not a spread, we're adding a single:
- // %AppendElement($R, value)
- // or, in case of a hole,
- // ++($R.length)
- if (!value->IsTheHoleLiteral()) {
- ZoneList<Expression*>* append_element_args = NewExpressionList(2);
- append_element_args->Add(factory()->NewVariableProxy(result), zone());
- append_element_args->Add(value, zone());
- do_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- kNoSourcePosition),
- kNoSourcePosition),
- zone());
- } else {
- Property* length_property = factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->length_string(),
- kNoSourcePosition),
- kNoSourcePosition);
- CountOperation* count_op = factory()->NewCountOperation(
- Token::INC, true /* prefix */, length_property, kNoSourcePosition);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(count_op, kNoSourcePosition),
- zone());
- }
- } else {
- // If it's a spread, we're adding a for/of loop iterating through it.
- Variable* each = NewTemporary(ast_value_factory()->dot_for_string());
- Expression* subject = spread->expression();
- // %AppendElement($R, each)
- Statement* append_body;
- {
- ZoneList<Expression*>* append_element_args = NewExpressionList(2);
- append_element_args->Add(factory()->NewVariableProxy(result), zone());
- append_element_args->Add(factory()->NewVariableProxy(each), zone());
- append_body = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args, kNoSourcePosition),
- kNoSourcePosition);
- }
- // for (each of spread) %AppendElement($R, each)
- ForOfStatement* loop =
- factory()->NewForOfStatement(nullptr, kNoSourcePosition);
- const bool finalize = false;
- InitializeForOfStatement(loop, factory()->NewVariableProxy(each), subject,
- append_body, finalize, IteratorType::kNormal);
- do_block->statements()->Add(loop, zone());
- }
- }
- // Now, rewind the original array literal to truncate everything from the
- // first spread (included) until the end. This fixes $R's initialization.
- lit->RewindSpreads();
- return factory()->NewDoExpression(do_block, result, lit->position());
-}
-
void Parser::QueueDestructuringAssignmentForRewriting(
RewritableExpression* expr) {
function_state_->AddDestructuringAssignment(expr);
}
-void Parser::QueueNonPatternForRewriting(RewritableExpression* expr, bool* ok) {
- function_state_->AddNonPatternForRewriting(expr, ok);
-}
-
void Parser::SetFunctionNameFromPropertyName(LiteralProperty* property,
const AstRawString* name,
const AstRawString* prefix) {
@@ -4315,9 +4296,8 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
zone());
Block* catch_block = factory()->NewBlock(0, false);
- Scope* catch_scope = NewHiddenCatchScope();
- try_call_return = factory()->NewTryCatchStatement(try_block, catch_scope,
- catch_block, nopos);
+ try_call_return =
+ factory()->NewTryCatchStatement(try_block, nullptr, catch_block, nopos);
}
// let output = %_Call(iteratorReturn, iterator);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index aa800dafc5..f92eddcd9d 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -267,6 +267,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(ParseInfo* info);
+ // Parse with the script as if the source is implicitly wrapped in a function.
+ // We manually construct the AST and scopes for a top-level function and the
+ // function wrapper.
+ void ParseWrapped(ParseInfo* info, ZoneList<Statement*>* body,
+ DeclarationScope* scope, Zone* zone, bool* ok);
+
+ ZoneList<const AstRawString*>* PrepareWrappedArguments(ParseInfo* info,
+ Zone* zone);
+
void SetCachedData(ParseInfo* info);
void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
@@ -292,10 +301,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_sent);
SET_ALLOW(harmony_public_fields);
+ SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
- SET_ALLOW(harmony_async_iteration);
SET_ALLOW(harmony_bigint);
+ SET_ALLOW(harmony_optional_catch_binding);
+ SET_ALLOW(harmony_private_fields);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -391,13 +402,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* RewriteDestructuringAssignment(Assignment* assignment);
// [if (IteratorType == kAsync)]
- // !%_IsJSReceiver(result = Await(iterator.next()) &&
+ // !%_IsJSReceiver(result = Await(next.[[Call]](iterator, « »)) &&
// %ThrowIteratorResultNotAnObject(result)
// [else]
- // !%_IsJSReceiver(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = next.[[Call]](iterator, « »)) &&
// %ThrowIteratorResultNotAnObject(result)
// [endif]
- Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
+ Expression* BuildIteratorNextResult(VariableProxy* iterator,
+ VariableProxy* next, Variable* result,
IteratorType type, int pos);
// Initialize the components of a for-in / for-of statement.
@@ -425,7 +437,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok);
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+
+ ObjectLiteral* InitializeObjectLiteral(ObjectLiteral* object_literal) {
+ object_literal->CalculateEmitStore(main_zone());
+ return object_literal;
+ }
// Check if the scope has conflicting var/let declarations from different
// scopes. This covers for example
@@ -488,7 +506,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* expected_property_count, bool* ok);
+ int* expected_property_count,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -553,13 +572,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* RewriteSpreads(ArrayLiteral* lit);
- // Rewrite expressions that are not used as patterns
- V8_INLINE void RewriteNonPattern(bool* ok);
-
V8_INLINE void QueueDestructuringAssignmentForRewriting(
RewritableExpression* assignment);
- V8_INLINE void QueueNonPatternForRewriting(RewritableExpression* expr,
- bool* ok);
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr);
@@ -760,17 +774,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool CollapseNaryExpression(Expression** x, Expression* y, Token::Value op,
int pos, const SourceRange& range);
- // Rewrites the following types of unary expressions:
- // not <literal> -> true / false
- // + <numeric literal> -> <numeric literal>
- // - <numeric literal> -> <numeric literal with value negated>
+ // Returns a UnaryExpression or, in one of the following cases, a Literal.
// ! <literal> -> true / false
- // The following rewriting rules enable the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- // + foo -> foo * 1
- // - foo -> foo * (-1)
- // ~ foo -> foo ^(~0)
+ // + <Number literal> -> <Number literal>
+ // - <Number literal> -> <Number literal with value negated>
+ // ~ <literal> -> true / false
Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
int pos);
@@ -990,10 +998,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return function_state_->GetReportedErrorList();
}
- V8_INLINE ZoneList<RewritableExpression*>* GetNonPatternList() const {
- return function_state_->non_patterns_to_rewrite();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
++use_counts_[feature];
}
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index faecb5bb0c..daa126d443 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -445,6 +445,11 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
auto iterator = CreateTempVar(factory()->NewGetIterator(
factory()->NewVariableProxy(temp), current_value_, IteratorType::kNormal,
current_value_->position()));
+ auto next = CreateTempVar(factory()->NewProperty(
+ factory()->NewVariableProxy(iterator),
+ factory()->NewStringLiteral(ast_value_factory()->next_string(),
+ kNoSourcePosition),
+ kNoSourcePosition));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
@@ -525,7 +530,8 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
next_block->statements()->Add(
factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator), result,
+ factory()->NewVariableProxy(iterator),
+ factory()->NewVariableProxy(next), result,
IteratorType::kNormal, kNoSourcePosition),
kNoSourcePosition),
zone());
@@ -599,6 +605,7 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// result = IteratorNext(iterator);
Statement* get_next = factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+ factory()->NewVariableProxy(next),
result, IteratorType::kNormal, nopos),
nopos);
@@ -756,6 +763,7 @@ NOT_A_PATTERN(ImportCallExpression)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
NOT_A_PATTERN(RegExpLiteral)
+NOT_A_PATTERN(ResolvedProperty)
NOT_A_PATTERN(ReturnStatement)
NOT_A_PATTERN(SloppyBlockFunctionStatement)
NOT_A_PATTERN(Spread)
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 7191639cf8..786be3f0e5 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -24,7 +24,7 @@ class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
class VariableContextAllocatedField
: public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-const int kMagicValue = 0xc0de0de;
+const int kMagicValue = 0xC0DE0DE;
#ifdef DEBUG
const size_t kUint32Size = 5;
@@ -571,8 +571,8 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope_data_->RemainingBytes() < kUint8Size) {
// Temporary debugging code for detecting inconsistent data. Write debug
// information on the stack, then crash.
- data_->GetIsolate()->PushStackTraceAndDie(0xc0defee, nullptr, nullptr,
- 0xc0defee);
+ data_->GetIsolate()->PushStackTraceAndDie(0xC0DEFEE, nullptr, nullptr,
+ 0xC0DEFEE);
}
// scope_type is stored only in debug mode.
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 16879e518c..b28eab2e75 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -207,12 +207,12 @@ PreParser::PreParseResult PreParser::PreParseFunction(
if (!IsArrowFunction(kind) && track_unresolved_variables_ &&
result == kLazyParsingComplete) {
- DeclareFunctionNameVar(function_name, function_type, function_scope);
-
// Declare arguments after parsing the function since lexical 'arguments'
// masks the arguments object. Declare arguments before declaring the
// function var since the arguments object masks 'function arguments'.
function_scope->DeclareArguments(ast_value_factory());
+
+ DeclareFunctionNameVar(function_name, function_type, function_scope);
}
use_counts_ = nullptr;
@@ -266,14 +266,18 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok) {
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
+ // Wrapped functions are not parsed in the preparser.
+ DCHECK_NULL(arguments_for_wrapped_function);
+ DCHECK_NE(FunctionLiteral::kWrapped, function_type);
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
- const RuntimeCallStats::CounterId counters[2][2] = {
- {&RuntimeCallStats::PreParseBackgroundNoVariableResolution,
- &RuntimeCallStats::PreParseNoVariableResolution},
- {&RuntimeCallStats::PreParseBackgroundWithVariableResolution,
- &RuntimeCallStats::PreParseWithVariableResolution}};
+ const RuntimeCallCounterId counters[2][2] = {
+ {RuntimeCallCounterId::kPreParseBackgroundNoVariableResolution,
+ RuntimeCallCounterId::kPreParseNoVariableResolution},
+ {RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
+ RuntimeCallCounterId::kPreParseWithVariableResolution}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
counters[track_unresolved_variables_][parsing_on_main_thread_]);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 8c1d183fd6..705cd011ee 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -956,12 +956,18 @@ class PreParser : public ParserBase<PreParser> {
bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
}
- Expression ParseFunctionLiteral(Identifier name,
- Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity,
- FunctionKind kind, int function_token_pos,
- FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok);
+
+ Expression ParseFunctionLiteral(
+ Identifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
+ LanguageMode language_mode,
+ ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
+
+ PreParserExpression InitializeObjectLiteral(PreParserExpression literal) {
+ return literal;
+ }
+
LazyParsingResult ParseStatementListAndLogFunction(
PreParserFormalParameters* formals, bool maybe_abort, bool* ok);
@@ -999,7 +1005,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void RewriteAsyncFunctionBody(
PreParserStatementList body, PreParserStatement block,
const PreParserExpression& return_value, bool* ok) {}
- V8_INLINE void RewriteNonPattern(bool* ok) { ValidateExpression(ok); }
void DeclareAndInitializeVariables(
PreParserStatement block,
@@ -1186,8 +1191,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void QueueDestructuringAssignmentForRewriting(
PreParserExpression assignment) {}
- V8_INLINE void QueueNonPatternForRewriting(const PreParserExpression& expr,
- bool* ok) {}
// Helper functions for recursive descent.
V8_INLINE bool IsEval(const PreParserIdentifier& identifier) const {
@@ -1665,10 +1668,6 @@ class PreParser : public ParserBase<PreParser> {
return function_state_->GetReportedErrorList();
}
- V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const {
- return function_state_->non_patterns_to_rewrite();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
if (use_counts_ != nullptr) ++use_counts_[feature];
}
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index c31d0ea21d..102efad292 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -367,8 +367,8 @@ bool Rewriter::Rewrite(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
- ? &RuntimeCallStats::CompileBackgroundRewriteReturnResult
- : &RuntimeCallStats::CompileRewriteReturnResult);
+ ? RuntimeCallCounterId::kCompileBackgroundRewriteReturnResult
+ : RuntimeCallCounterId::kCompileRewriteReturnResult);
FunctionLiteral* function = info->literal();
DCHECK_NOT_NULL(function);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index c5175c4de7..20aa5c9f8e 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace {
-const unibrow::uchar kUtf8Bom = 0xfeff;
+const unibrow::uchar kUtf8Bom = 0xFEFF;
} // namespace
// ----------------------------------------------------------------------------
@@ -203,7 +203,7 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
Utf8ExternalStreamingStream(
ScriptCompiler::ExternalSourceStream* source_stream,
RuntimeCallStats* stats)
- : current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
+ : current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
source_stream_(source_stream),
stats_(stats) {}
~Utf8ExternalStreamingStream() override {
@@ -223,7 +223,8 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
struct StreamPosition {
size_t bytes;
size_t chars;
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char;
+ uint32_t incomplete_char;
+ unibrow::Utf8::State state;
};
// Position contains a StreamPosition and the index of the chunk the position
@@ -268,25 +269,25 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
const Chunk& chunk = chunks_[current_.chunk_no];
DCHECK(current_.pos.bytes >= chunk.start.bytes);
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
- chunk.start.incomplete_char;
+ unibrow::Utf8::State state = chunk.start.state;
+ uint32_t incomplete_char = chunk.start.incomplete_char;
size_t it = current_.pos.bytes - chunk.start.bytes;
size_t chars = chunk.start.chars;
while (it < chunk.length && chars < position) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
+ chunk.data[it], &it, &state, &incomplete_char);
if (t == kUtf8Bom && current_.pos.chars == 0) {
// BOM detected at beginning of the stream. Don't copy it.
} else if (t != unibrow::Utf8::kIncomplete) {
chars++;
if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
}
- it++;
}
current_.pos.bytes += it;
current_.pos.chars = chars;
current_.pos.incomplete_char = incomplete_char;
+ current_.pos.state = state;
current_.chunk_no += (it == chunk.length);
return current_.pos.chars == position;
@@ -304,31 +305,33 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
uint16_t* cursor = buffer_ + (buffer_end_ - buffer_start_);
DCHECK_EQ(cursor, buffer_end_);
+ unibrow::Utf8::State state = current_.pos.state;
+ uint32_t incomplete_char = current_.pos.incomplete_char;
+
// If the current chunk is the last (empty) chunk we'll have to process
// any left-over, partial characters.
if (chunk.length == 0) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncrementalFinish(&current_.pos.incomplete_char);
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
if (t != unibrow::Utf8::kBufferEmpty) {
- DCHECK_LT(t, unibrow::Utf16::kMaxNonSurrogateCharCode);
+ DCHECK_EQ(t, unibrow::Utf8::kBadChar);
*cursor = static_cast<uc16>(t);
buffer_end_++;
current_.pos.chars++;
+ current_.pos.incomplete_char = 0;
+ current_.pos.state = state;
}
return;
}
- unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
- current_.pos.incomplete_char;
- size_t it;
- for (it = current_.pos.bytes - chunk.start.bytes;
- it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize; it++) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
- if (t == unibrow::Utf8::kIncomplete) continue;
+ size_t it = current_.pos.bytes - chunk.start.bytes;
+ while (it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize) {
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(
+ chunk.data[it], &it, &state, &incomplete_char);
if (V8_LIKELY(t < kUtf8Bom)) {
*(cursor++) = static_cast<uc16>(t); // The by most frequent case.
- } else if (t == kUtf8Bom && current_.pos.bytes + it == 2) {
+ } else if (t == unibrow::Utf8::kIncomplete) {
+ continue;
+ } else if (t == kUtf8Bom && current_.pos.bytes + it == 3) {
// BOM detected at beginning of the stream. Don't copy it.
} else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
*(cursor++) = static_cast<uc16>(t);
@@ -341,13 +344,15 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
current_.pos.bytes = chunk.start.bytes + it;
current_.pos.chars += (cursor - buffer_end_);
current_.pos.incomplete_char = incomplete_char;
+ current_.pos.state = state;
current_.chunk_no += (it == chunk.length);
buffer_end_ = cursor;
}
bool Utf8ExternalStreamingStream::FetchChunk() {
- RuntimeCallTimerScope scope(stats_, &RuntimeCallStats::GetMoreDataCallback);
+ RuntimeCallTimerScope scope(stats_,
+ RuntimeCallCounterId::kGetMoreDataCallback);
DCHECK_EQ(current_.chunk_no, chunks_.size());
DCHECK(chunks_.empty() || chunks_.back().length != 0);
@@ -395,16 +400,15 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// checking whether the # bytes in a chunk are equal to the # chars, and if
// so avoid the expensive SkipToPosition.)
bool ascii_only_chunk =
- chunks_[chunk_no].start.incomplete_char ==
- unibrow::Utf8::Utf8IncrementalBuffer(0) &&
+ chunks_[chunk_no].start.incomplete_char == 0 &&
(chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
(chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
if (ascii_only_chunk) {
size_t skip = position - chunks_[chunk_no].start.chars;
current_ = {chunk_no,
{chunks_[chunk_no].start.bytes + skip,
- chunks_[chunk_no].start.chars + skip,
- unibrow::Utf8::Utf8IncrementalBuffer(0)}};
+ chunks_[chunk_no].start.chars + skip, 0,
+ unibrow::Utf8::State::kAccept}};
} else {
current_ = {chunk_no, chunks_[chunk_no].start};
SkipToPosition(position);
@@ -491,7 +495,8 @@ size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source,
// Get more data if needed. We usually won't enter the loop body.
bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
{
- RuntimeCallTimerScope scope(stats, &RuntimeCallStats::GetMoreDataCallback);
+ RuntimeCallTimerScope scope(stats,
+ RuntimeCallCounterId::kGetMoreDataCallback);
while (!out_of_data && end_pos <= position + 1) {
const uint8_t* chunk = nullptr;
size_t len = source->GetMoreData(&chunk);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 8030b93889..3152ab184e 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -174,31 +174,15 @@ bool Scanner::BookmarkScope::HasBeenApplied() {
return bookmark_ == kBookmarkWasApplied;
}
-// LineTerminator: 'JS_Line_Terminator' in point.properties
-// ES#sec-line-terminators lists exactly 4 code points:
-// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
-bool Scanner::IsLineTerminator(uc32 c) {
- if (c == 0x000A || c == 0x000D) {
- return true;
- }
- if (c == 0x2028 || c == 0x2029) {
- ++use_counts_[v8::Isolate::UseCounterFeature::
- kLineOrParagraphSeparatorAsLineTerminator];
- return true;
- }
- return false;
-}
-
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(UnicodeCache* unicode_cache, int* use_counts)
+Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
- allow_harmony_bigint_(false),
- use_counts_(use_counts) {}
+ allow_harmony_bigint_(false) {}
void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
DCHECK_NOT_NULL(source);
@@ -257,7 +241,8 @@ uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
// Ensure that tokens can be stored in a byte.
STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
-// Table of one-character tokens, by character (0x00..0x7f only).
+// Table of one-character tokens, by character (0x00..0x7F only).
+// clang-format off
static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
@@ -303,7 +288,7 @@ static const byte one_char_tokens[] = {
Token::RPAREN, // 0x29
Token::ILLEGAL,
Token::ILLEGAL,
- Token::COMMA, // 0x2c
+ Token::COMMA, // 0x2C
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -317,12 +302,12 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::COLON, // 0x3a
- Token::SEMICOLON, // 0x3b
+ Token::COLON, // 0x3A
+ Token::SEMICOLON, // 0x3B
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::CONDITIONAL, // 0x3f
+ Token::CONDITIONAL, // 0x3F
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -350,9 +335,9 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::LBRACK, // 0x5b
+ Token::LBRACK, // 0x5B
Token::ILLEGAL,
- Token::RBRACK, // 0x5d
+ Token::RBRACK, // 0x5D
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
@@ -382,13 +367,13 @@ static const byte one_char_tokens[] = {
Token::ILLEGAL,
Token::ILLEGAL,
Token::ILLEGAL,
- Token::LBRACE, // 0x7b
+ Token::LBRACE, // 0x7B
Token::ILLEGAL,
- Token::RBRACE, // 0x7d
- Token::BIT_NOT, // 0x7e
+ Token::RBRACE, // 0x7D
+ Token::BIT_NOT, // 0x7E
Token::ILLEGAL
};
-
+// clang-format on
Token::Value Scanner::Next() {
if (next_.token == Token::EOS) {
@@ -405,7 +390,7 @@ Token::Value Scanner::Next() {
}
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
- if (static_cast<unsigned>(c0_) <= 0x7f) {
+ if (static_cast<unsigned>(c0_) <= 0x7F) {
Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
if (token != Token::ILLEGAL) {
int pos = source_pos();
@@ -457,7 +442,7 @@ Token::Value Scanner::SkipWhiteSpace() {
// Advance as long as character is a WhiteSpace or LineTerminator.
// Remember if the latter is the case.
- if (IsLineTerminator(c0_)) {
+ if (unibrow::IsLineTerminator(c0_)) {
has_line_terminator_before_next_ = true;
} else if (!unicode_cache_->IsWhiteSpace(c0_)) {
break;
@@ -514,7 +499,7 @@ Token::Value Scanner::SkipSingleLineComment() {
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
Advance();
}
@@ -524,7 +509,7 @@ Token::Value Scanner::SkipSingleLineComment() {
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
Advance();
}
@@ -560,7 +545,7 @@ void Scanner::TryToParseSourceURLComment() {
while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
value->Reset();
@@ -573,7 +558,7 @@ void Scanner::TryToParseSourceURLComment() {
Advance();
}
// Allow whitespace at the end.
- while (c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
if (!unicode_cache_->IsWhiteSpace(c0_)) {
value->Reset();
break;
@@ -590,7 +575,7 @@ Token::Value Scanner::SkipMultiLineComment() {
while (c0_ != kEndOfInput) {
uc32 ch = c0_;
Advance();
- if (c0_ != kEndOfInput && IsLineTerminator(ch)) {
+ if (c0_ != kEndOfInput && unibrow::IsLineTerminator(ch)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
has_multiline_comment_before_next_ = true;
@@ -875,6 +860,10 @@ void Scanner::Scan() {
token = ScanTemplateStart();
break;
+ case '#':
+ token = ScanPrivateName();
+ break;
+
default:
if (c0_ == kEndOfInput) {
token = Token::EOS;
@@ -940,6 +929,7 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
case Token::REGEXP_LITERAL:
case Token::SMI:
case Token::STRING:
+ case Token::PRIVATE_NAME:
DCHECK_NOT_NULL(token.literal_chars);
DCHECK_NULL(token.raw_literal_chars);
DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
@@ -987,7 +977,8 @@ bool Scanner::ScanEscape() {
Advance<capture_raw>();
// Skip escaped newlines.
- if (!in_template_literal && c0_ != kEndOfInput && IsLineTerminator(c)) {
+ if (!in_template_literal && c0_ != kEndOfInput &&
+ unibrow::IsLineTerminator(c)) {
// Allow escaped CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
return true;
@@ -1080,7 +1071,8 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ != kEndOfInput && !IsLineTerminator(c0_)) {
+ while (c0_ != quote && c0_ != kEndOfInput &&
+ !unibrow::IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -1098,6 +1090,26 @@ Token::Value Scanner::ScanString() {
return Token::STRING;
}
+Token::Value Scanner::ScanPrivateName() {
+ if (!allow_harmony_private_fields()) {
+ ReportScannerError(source_pos(),
+ MessageTemplate::kInvalidOrUnexpectedToken);
+ return Token::ILLEGAL;
+ }
+
+ LiteralScope literal(this);
+ DCHECK_EQ(c0_, '#');
+ AddLiteralCharAdvance();
+ if (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_)) {
+ PushBack(c0_);
+ ReportScannerError(source_pos(),
+ MessageTemplate::kInvalidOrUnexpectedToken);
+ return Token::ILLEGAL;
+ }
+
+ Token::Value token = ScanIdentifierOrKeywordInner(&literal);
+ return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
+}
Token::Value Scanner::ScanTemplateSpan() {
// When scanning a TemplateSpan, we are looking for the following construct:
@@ -1136,7 +1148,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (c0_ != kEndOfInput && IsLineTerminator(c0_)) {
+ if (c0_ != kEndOfInput && unibrow::IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
@@ -1397,7 +1409,7 @@ uc32 Scanner::ScanUnicodeEscape() {
if (c0_ == '{') {
int begin = source_pos() - 2;
Advance<capture_raw>();
- uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff, begin);
+ uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10FFFF, begin);
if (cp < 0 || c0_ != '}') {
ReportScannerError(source_pos(),
MessageTemplate::kInvalidUnicodeEscapeSequence);
@@ -1541,10 +1553,13 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
return Token::IDENTIFIER;
}
-
Token::Value Scanner::ScanIdentifierOrKeyword() {
- DCHECK(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
+ return ScanIdentifierOrKeywordInner(&literal);
+}
+
+Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
+ DCHECK(unicode_cache_->IsIdentifierStart(c0_));
if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
do {
char first_char = static_cast<char>(c0_);
@@ -1564,7 +1579,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
AddLiteralChar(first_char);
}
if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
} else if (c0_ <= kMaxAscii && c0_ != '\\') {
@@ -1575,7 +1590,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (token == Token::IDENTIFIER ||
token == Token::FUTURE_STRICT_RESERVED_WORD ||
Token::IsContextualKeyword(token))
- literal.Complete();
+ literal->Complete();
return token;
}
@@ -1588,7 +1603,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
} while (IsAsciiIdentifier(c0_));
if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
@@ -1603,7 +1618,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return Token::ILLEGAL;
}
AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal, true);
+ return ScanIdentifierSuffix(literal, true);
} else {
uc32 first_char = c0_;
Advance();
@@ -1619,7 +1634,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
continue;
}
// Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(&literal, false);
+ return ScanIdentifierSuffix(literal, false);
}
if (next_.literal_chars->is_one_byte()) {
@@ -1629,10 +1644,10 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (token == Token::IDENTIFIER ||
token == Token::FUTURE_STRICT_RESERVED_WORD ||
Token::IsContextualKeyword(token))
- literal.Complete();
+ literal->Complete();
return token;
}
- literal.Complete();
+ literal->Complete();
return Token::IDENTIFIER;
}
@@ -1697,12 +1712,12 @@ bool Scanner::ScanRegExpPattern() {
}
while (c0_ != '/' || in_character_class) {
- if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
+ if (c0_ == kEndOfInput || unibrow::IsLineTerminator(c0_)) {
return false;
}
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (c0_ == kEndOfInput || IsLineTerminator(c0_)) {
+ if (c0_ == kEndOfInput || unibrow::IsLineTerminator(c0_)) {
return false;
}
AddLiteralCharAdvance();
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 08d77c686b..f5106990ff 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -207,7 +207,7 @@ class Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(UnicodeCache* scanner_contants, int* use_counts_);
+ explicit Scanner(UnicodeCache* scanner_contants);
void Initialize(Utf16CharacterStream* source, bool is_module);
@@ -360,6 +360,12 @@ class Scanner {
bool allow_harmony_bigint() const { return allow_harmony_bigint_; }
void set_allow_harmony_bigint(bool allow) { allow_harmony_bigint_ = allow; }
+ bool allow_harmony_private_fields() const {
+ return allow_harmony_private_fields_;
+ }
+ void set_allow_harmony_private_fields(bool allow) {
+ allow_harmony_private_fields_ = allow;
+ }
private:
// Scoped helper for saving & restoring scanner error state.
@@ -717,9 +723,11 @@ class Scanner {
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
+ Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
Token::Value ScanIdentifierSuffix(LiteralScope* literal, bool escaped);
Token::Value ScanString();
+ Token::Value ScanPrivateName();
// Scans an escape-sequence which is part of a string and adds the
// decoded character to the current literal. Returns true if a pattern
@@ -736,8 +744,6 @@ class Scanner {
bool is_module_;
- bool IsLineTerminator(uc32 c);
-
Token::Value ScanTemplateSpan();
// Return the current source position.
@@ -802,10 +808,9 @@ class Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
- // Whether to recognize BIGINT tokens.
+ // Harmony flags to allow ESNext features.
bool allow_harmony_bigint_;
-
- int* use_counts_;
+ bool allow_harmony_private_fields_;
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index e4a4a5e587..07974edf41 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -151,6 +151,7 @@ namespace internal {
\
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, nullptr, 0) \
+ T(PRIVATE_NAME, nullptr, 0) \
\
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index c52bb5222a..7ccd02ef9b 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -87,7 +87,7 @@ struct PerfJitDebugEntry {
uint64_t address_;
int line_number_;
int column_;
- // Followed by null-terminated name or \0xff\0 if same as previous.
+ // Followed by null-terminated name or \0xFF\0 if same as previous.
};
struct PerfJitCodeDebugInfo : PerfJitBase {
@@ -396,7 +396,7 @@ void PerfJitLogger::LogWriteHeader() {
header.version_ = PerfJitHeader::kVersion;
header.size_ = sizeof(header);
header.elf_mach_target_ = GetElfMach();
- header.reserved_ = 0xdeadbeef;
+ header.reserved_ = 0xDEADBEEF;
header.process_id_ = base::OS::GetCurrentProcessId();
header.time_stamp_ =
static_cast<uint64_t>(V8::GetCurrentPlatform()->CurrentClockTimeMillis() *
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index d9b12ac8db..451a1afa46 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -60,9 +60,9 @@ void RelocInfo::apply(intptr_t delta) {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(nullptr, pc_, host_, target + delta,
- SKIP_ICACHE_FLUSH);
+ Address target = Assembler::target_address_at(pc_, constant_pool_);
+ Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
+ target + delta, SKIP_ICACHE_FLUSH);
}
}
@@ -74,7 +74,7 @@ Address RelocInfo::target_internal_reference() {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
}
@@ -87,7 +87,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -117,12 +117,11 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
if (FLAG_enable_embedded_constant_pool) {
- Address constant_pool = host_->constant_pool();
- DCHECK(constant_pool);
+ DCHECK(constant_pool_);
ConstantPoolEntry::Access access;
if (Assembler::IsConstantPoolLoadStart(pc_, &access))
return Assembler::target_constant_pool_address_at(
- pc_, constant_pool, access, ConstantPoolEntry::INTPTR);
+ pc_, constant_pool_, access, ConstantPoolEntry::INTPTR);
}
UNREACHABLE();
}
@@ -130,18 +129,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -176,21 +163,21 @@ Address Assembler::return_address_from_call_start(Address pc) {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(
- reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
+ return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -203,7 +190,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -230,10 +217,10 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -428,15 +415,15 @@ Address Assembler::target_constant_pool_address_at(
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = nullptr;
- set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 0c4a518772..90b18b02ba 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -155,30 +155,30 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_embedded_constant_pool && host_ != nullptr) {
- Address constant_pool = host_->constant_pool();
- return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
+ if (FLAG_enable_embedded_constant_pool && constant_pool_ != nullptr) {
+ return (constant_pool_ && Assembler::IsConstantPoolLoadStart(pc_));
}
return false;
}
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -350,9 +350,9 @@ bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
// 798c07c6 rldicr r12, r12, 32, 31
// 658c00c3 oris r12, r12, 195
// 618ccd40 ori r12, r12, 52544
- return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
- (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
- ((instr5 >> 16) == 0x618c));
+ return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
+ (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
+ ((instr5 >> 16) == 0x618C));
}
#else
// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
@@ -360,7 +360,7 @@ bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
// Check the instruction is indeed a two part load (into r12)
// 3d802553 lis r12, 9555
// 618c5000 ori r12, r12, 20480
- return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
+ return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
}
#endif
@@ -513,8 +513,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundAddLabelOffsetOpcode: {
// dst = base + position + immediate
Instr operands = instr_at(pos + kInstrSize);
- Register dst = Register::from_code((operands >> 21) & 0x1f);
- Register base = Register::from_code((operands >> 16) & 0x1f);
+ Register dst = Register::from_code((operands >> 21) & 0x1F);
+ Register base = Register::from_code((operands >> 16) & 0x1F);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
@@ -660,9 +660,9 @@ void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
int maskbit, RCBit r) {
- int sh0_4 = shift & 0x1f;
+ int sh0_4 = shift & 0x1F;
int sh5 = (shift >> 5) & 0x1;
- int m0_4 = maskbit & 0x1f;
+ int m0_4 = maskbit & 0x1F;
int m5 = (maskbit >> 5) & 0x1;
emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
@@ -672,7 +672,7 @@ void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
int maskbit, RCBit r) {
- int m0_4 = maskbit & 0x1f;
+ int m0_4 = maskbit & 0x1F;
int m5 = (maskbit >> 5) & 0x1;
emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
@@ -766,9 +766,9 @@ void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
- sh &= 0x1f;
- mb &= 0x1f;
- me &= 0x1f;
+ sh &= 0x1F;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
me << 1 | rc);
}
@@ -776,8 +776,8 @@ void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
RCBit rc) {
- mb &= 0x1f;
- me &= 0x1f;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
me << 1 | rc);
}
@@ -785,9 +785,9 @@ void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
- sh &= 0x1f;
- mb &= 0x1f;
- me &= 0x1f;
+ sh &= 0x1F;
+ mb &= 0x1F;
+ me &= 0x1F;
emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
me << 1 | rc);
}
@@ -1191,7 +1191,7 @@ void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
- int sh0_4 = sh & 0x1f;
+ int sh0_4 = sh & 0x1F;
int sh5 = (sh >> 5) & 0x1;
emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
@@ -1368,19 +1368,19 @@ void Assembler::mov(Register dst, const Operand& src) {
li(dst, Operand(value >> 32));
} else {
lis(dst, Operand(value >> 48));
- u16 = ((value >> 32) & 0xffff);
+ u16 = ((value >> 32) & 0xFFFF);
if (u16) {
ori(dst, dst, Operand(u16));
}
}
sldi(dst, dst, Operand(32));
- u16 = ((value >> 16) & 0xffff);
+ u16 = ((value >> 16) & 0xFFFF);
if (u16) {
oris(dst, dst, Operand(u16));
}
}
#endif
- u16 = (value & 0xffff);
+ u16 = (value & 0xFFFF);
if (u16) {
ori(dst, dst, Operand(u16));
}
@@ -1402,17 +1402,17 @@ void Assembler::bitwise_mov(Register dst, intptr_t value) {
int32_t hi_32 = static_cast<int32_t>(value >> 32);
int32_t lo_32 = static_cast<int32_t>(value);
int hi_word = static_cast<int>(hi_32 >> 16);
- int lo_word = static_cast<int>(hi_32 & 0xffff);
+ int lo_word = static_cast<int>(hi_32 & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
sldi(dst, dst, Operand(32));
- hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
- lo_word = static_cast<int>(lo_32 & 0xffff);
+ hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
+ lo_word = static_cast<int>(lo_32 & 0xFFFF);
oris(dst, dst, Operand(hi_word));
ori(dst, dst, Operand(lo_word));
#else
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
#endif
@@ -1422,7 +1422,7 @@ void Assembler::bitwise_mov(Register dst, intptr_t value) {
void Assembler::bitwise_mov32(Register dst, int32_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
}
@@ -1435,7 +1435,7 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
nop();
} else {
int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xffff);
+ int lo_word = static_cast<int>(value & 0xFFFF);
if (lo_word & 0x8000) hi_word++;
addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
@@ -1636,9 +1636,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code,
}
}
-
-void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
-
+void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
@@ -2085,8 +2083,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = nullptr;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(pc, rmode, it->data(), nullptr);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2095,8 +2092,8 @@ void Assembler::EmitRelocations() {
Memory::Address_at(pc) = buffer_ + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
- intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(nullptr, pc, code, buffer_ + pos,
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
+ set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
SKIP_ICACHE_FLUSH);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 77c1422424..0204d65fa5 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -303,6 +303,7 @@ constexpr Register kConstantPoolRegister = r28; // Constant pool.
constexpr Register kRootRegister = r29; // Roots array pointer.
constexpr Register cp = r30; // JavaScript context pointer.
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -582,10 +583,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index d5af6bfec0..5c3d38786f 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -129,8 +129,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ srawi(r0, scratch_high, 31);
#if V8_TARGET_ARCH_PPC64
__ srdi(r0, r0, Operand(32));
@@ -489,6 +489,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up the reserved register for 0.0.
__ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
+ __ InitializeRootRegister();
+
// Push a frame with special values setup to mark it as an entry frame.
// r3: code entry
// r4: function
@@ -566,12 +568,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r5: receiver
// r6: argc
// r7: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -763,7 +760,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -804,7 +801,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadP(r8, FieldMemOperand(r5, 0));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r6
@@ -831,7 +828,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -906,9 +903,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r7, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r7, r7, r8, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r5 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r5, r7);
@@ -987,9 +984,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6, r0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -1004,7 +1001,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmpi(r6, Operand(PACKED_ELEMENTS));
__ beq(&done);
__ cmpi(r6, Operand(HOLEY_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1115,7 +1114,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ lwz(r4, MemOperand(r17, kLevelOffset));
__ cmp(r4, r16);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ subi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 13c9af7e22..4641dc260c 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -41,8 +40,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 9c4fe5fd6a..069fcb26ad 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return r3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r7; }
-
const Register StoreDescriptor::ReceiverRegister() { return r4; }
const Register StoreDescriptor::NameRegister() { return r5; }
const Register StoreDescriptor::ValueRegister() { return r3; }
@@ -202,6 +200,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 75e176c09c..8d7c3d05b4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -445,7 +445,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (emit_debug_code()) {
LoadP(r0, MemOperand(address));
cmp(r0, value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -694,7 +694,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
slw(dst_high, src_low, scratch);
li(dst_low, Operand::Zero());
b(&done);
@@ -717,7 +717,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Move(dst_high, src_low);
li(dst_low, Operand::Zero());
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
slwi(dst_high, src_low, Operand(shift));
li(dst_low, Operand::Zero());
} else if (shift == 0) {
@@ -741,7 +741,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
srw(dst_low, src_high, scratch);
li(dst_high, Operand::Zero());
b(&done);
@@ -764,7 +764,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Move(dst_low, src_high);
li(dst_high, Operand::Zero());
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
srwi(dst_low, src_high, Operand(shift));
li(dst_high, Operand::Zero());
} else if (shift == 0) {
@@ -787,7 +787,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
cmpi(shift, Operand(32));
blt(&less_than_32);
// If shift >= 32
- andi(scratch, shift, Operand(0x1f));
+ andi(scratch, shift, Operand(0x1F));
sraw(dst_low, src_high, scratch);
srawi(dst_high, src_high, 31);
b(&done);
@@ -810,7 +810,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Move(dst_low, src_high);
srawi(dst_high, src_high, 31);
} else if (shift > 32) {
- shift &= 0x1f;
+ shift &= 0x1F;
srawi(dst_low, src_high, shift);
srawi(dst_high, src_high, 31);
} else if (shift == 0) {
@@ -1034,6 +1034,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
LoadP(cp, MemOperand(ip));
#ifdef DEBUG
+ mov(r6, Operand(Context::kInvalidContext));
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(r6, MemOperand(ip));
@@ -1091,7 +1092,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmpl(src_reg, dst_reg);
- Check(lt, kStackAccessBelowStackPointer);
+ Check(lt, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1327,9 +1328,11 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ Push(Smi::kZero); // Padding.
+
// Link the current handler as the next handler.
// Preserve r3-r7.
mov(r8,
@@ -1343,13 +1346,15 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
mov(ip,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
StoreP(r4, MemOperand(ip));
+
+ Drop(1); // Drop padding.
}
@@ -1365,7 +1370,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmpi(type_reg, Operand(type));
}
@@ -1645,12 +1650,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason,
+void TurboAssembler::Assert(Condition cond, AbortReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L, cr);
Abort(reason);
@@ -1658,7 +1663,7 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -1713,7 +1718,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmi, cr0);
+ Check(ne, AbortReason::kOperandIsASmi, cr0);
}
}
@@ -1722,7 +1727,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(eq, kOperandIsNotSmi, cr0);
+ Check(eq, AbortReason::kOperandIsNotASmi, cr0);
}
}
@@ -1730,11 +1735,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1742,11 +1747,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1755,18 +1760,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object, r0);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
// Load map
Register map = object;
@@ -1785,7 +1790,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1797,7 +1802,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
beq(&done_checking);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index c508ae128a..c67ef4ab90 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -404,13 +404,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
inline bool AllowThisStubCall(CodeStub* stub);
#if !V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index ff62c4a56e..a92e5363ea 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -47,7 +47,7 @@ class PPCDebugger {
void Debug();
private:
- static const Instr kBreakpointInstr = (TWI | 0x1f * B21);
+ static const Instr kBreakpointInstr = (TWI | 0x1F * B21);
static const Instr kNopInstr = (ORI); // ori, 0,0,0
Simulator* sim_;
@@ -232,7 +232,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
} else {
sim_->ExecuteInstruction(
@@ -256,7 +256,7 @@ void PPCDebugger::Debug() {
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
} else {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -314,7 +314,7 @@ void PPCDebugger::Debug() {
PrintF("%3s: %f 0x%08x %08x\n",
GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else if (arg1[0] == 'r' &&
(arg1[1] >= '0' && arg1[1] <= '9' &&
@@ -336,7 +336,7 @@ void PPCDebugger::Debug() {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -664,6 +664,10 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr | kCallRtRedirected);
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -733,21 +737,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_PPC64
@@ -792,116 +787,6 @@ Simulator::~Simulator() {
free(stack_);
}
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->FlushICache(
- isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
- function_descriptor_[1] = 0;
- function_descriptor_[2] = 0;
- }
- }
-
- void* address() {
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- return reinterpret_cast<void*>(function_descriptor_);
- } else {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static Redirection* FromAddress(void* address) {
- int delta = ABI_USES_FUNCTION_DESCRIPTORS
- ? offsetof(Redirection, function_descriptor_)
- : offsetof(Redirection, swi_instruction_);
- char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
- intptr_t function_descriptor_[3];
-};
-
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -988,9 +873,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
#if 0 // A good idea to trash volatile registers, needs to be done
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
#endif
}
@@ -1239,7 +1124,7 @@ void Simulator::Format(Instruction* instr, const char* format) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1330,7 +1215,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
const int kArgCount = 9;
const int kRegisterArgCount = 8;
int arg0_regnum = 3;
@@ -1641,7 +1526,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF(
"Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -1958,10 +1843,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
} else { // mb > me+1
int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -1987,7 +1872,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
} else {
int rb = instr->RBValue();
uint32_t rb_val = get_register(rb);
- sh = (rb_val & 0x1f);
+ sh = (rb_val & 0x1F);
}
int mb = instr->Bits(10, 6);
int me = instr->Bits(5, 1);
@@ -2000,10 +1885,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
} else { // mb > me+1
int bit = 0x80000000 >> (me + 1); // needs to be tested
- mask = 0xffffffff;
+ mask = 0xFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -2078,7 +1963,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x3f;
+ uintptr_t rb_val = get_register(rb) & 0x3F;
intptr_t result = (rb_val > 31) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2092,7 +1977,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x7f;
+ uintptr_t rb_val = get_register(rb) & 0x7F;
intptr_t result = (rb_val > 63) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2160,7 +2045,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
int32_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb) & 0x3f;
+ intptr_t rb_val = get_register(rb) & 0x3F;
intptr_t result = (rb_val > 31) ? rs_val >> 31 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2174,7 +2059,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
intptr_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb) & 0x7f;
+ intptr_t rb_val = get_register(rb) & 0x7F;
intptr_t result = (rb_val > 63) ? rs_val >> 63 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2244,10 +2129,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
int64_t dval = static_cast<int64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29) | 0x0;
set_d_register(frt, dval);
} else {
set_d_register_from_double(frt, static_cast<double>(*fptr));
@@ -2289,9 +2174,9 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
// Conversion using double changes sNan to qNan on ia32/x64
int32_t sval = 0;
int64_t dval = get_d_register(frs);
- if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
- sval = ((dval & 0xc000000000000000) >> 32) |
- ((dval & 0x07ffffffe0000000) >> 29);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
p = &sval;
} else {
p = reinterpret_cast<int32_t*>(&frs_val);
@@ -2625,7 +2510,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x3f;
+ uintptr_t rb_val = get_register(rb) & 0x3F;
uint32_t result = (rb_val > 31) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -2639,7 +2524,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb) & 0x7f;
+ uintptr_t rb_val = get_register(rb) & 0x7F;
uintptr_t result = (rb_val > 63) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3249,7 +3134,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
+ uintptr_t result = ReadHU(ra_val + offset, instr) & 0xFFFF;
set_register(rt, result);
if (opcode == LHZU) {
set_register(ra, ra_val + offset);
@@ -3302,10 +3187,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
int64_t dval = static_cast<int64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29) | 0x0;
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29) | 0x0;
set_d_register(frt, dval);
} else {
set_d_register_from_double(frt, static_cast<double>(*fptr));
@@ -3347,9 +3232,9 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
// Conversion using double changes sNan to qNan on ia32/x64
int32_t sval = 0;
int64_t dval = get_d_register(frs);
- if ((dval & 0x7ff0000000000000) == 0x7ff0000000000000) {
- sval = ((dval & 0xc000000000000000) >> 32) |
- ((dval & 0x07ffffffe0000000) >> 29);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
p = &sval;
} else {
p = reinterpret_cast<int32_t*>(&frs_val);
@@ -3749,7 +3634,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case MTFSF: {
int frb = instr->RBValue();
int64_t frb_dval = get_d_register(frb);
- int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xffffffff);
+ int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xFFFFFFFF);
int l = instr->Bits(25, 25);
if (l == 1) {
fp_condition_reg_ = frb_ival;
@@ -3774,8 +3659,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int bfa = instr->Bits(20, 18);
int cr_shift = (7 - bf) * CRWIDTH;
int fp_shift = (7 - bfa) * CRWIDTH;
- int field_val = (fp_condition_reg_ >> fp_shift) & 0xf;
- condition_reg_ &= ~(0x0f << cr_shift);
+ int field_val = (fp_condition_reg_ >> fp_shift) & 0xF;
+ condition_reg_ &= ~(0x0F << cr_shift);
condition_reg_ |= (field_val << cr_shift);
// Clear copied exception bits
switch (bfa) {
@@ -3826,7 +3711,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff >> mb;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF >> mb;
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3843,7 +3728,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(me >= 0 && me <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff << (63 - me);
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF << (63 - me);
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3860,7 +3745,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh);
+ uintptr_t mask = (0xFFFFFFFFFFFFFFFF >> mb) & (0xFFFFFFFFFFFFFFFF << sh);
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -3885,10 +3770,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
bit >>= 1;
}
} else if (mb == me + 1) {
- mask = 0xffffffffffffffff;
+ mask = 0xFFFFFFFFFFFFFFFF;
} else { // mb > me+1
uintptr_t bit = 0x8000000000000000 >> (me + 1); // needs to be tested
- mask = 0xffffffffffffffff;
+ mask = 0xFFFFFFFFFFFFFFFF;
for (; me < mb; me++) {
mask ^= bit;
bit >>= 1;
@@ -3909,12 +3794,12 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
uintptr_t rb_val = get_register(rb);
- int sh = (rb_val & 0x3f);
+ int sh = (rb_val & 0x3F);
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
- uintptr_t mask = 0xffffffffffffffff >> mb;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF >> mb;
result &= mask;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
@@ -4202,17 +4087,15 @@ void Simulator::CallInternal(byte* entry) {
set_register(fp, r31_val);
}
-
-intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Set up arguments
// First eight arguments passed in registers r3-r10.
- int reg_arg_count = (argument_count > 8) ? 8 : argument_count;
+ int reg_arg_count = std::min(8, argument_count);
int stack_arg_count = argument_count - reg_arg_count;
for (int i = 0; i < reg_arg_count; i++) {
- set_register(i + 3, va_arg(parameters, intptr_t));
+ set_register(i + 3, arguments[i]);
}
// Remaining arguments passed on stack.
@@ -4228,10 +4111,8 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// +2 is a hack for the LR slot + old SP on PPC
intptr_t* stack_argument =
reinterpret_cast<intptr_t*>(entry_stack) + kStackFrameExtraParamSlot;
- for (int i = 0; i < stack_arg_count; i++) {
- stack_argument[i] = va_arg(parameters, intptr_t);
- }
- va_end(parameters);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_arg_count * sizeof(*arguments));
set_register(sp, entry_stack);
CallInternal(entry);
@@ -4240,8 +4121,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
- intptr_t result = get_register(r3);
- return result;
+ return get_register(r3);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index aba6c3671b..544b9d463e 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
// Declares a Simulator for PPC instructions if we are not generating a native
// PPC binary. This Simulator allows us to run and debug PPC code generation on
// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a PPC HW platform.
@@ -15,55 +14,13 @@
#include "src/allocation.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native ppc platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type ppc_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ppc uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/ppc/constants-ppc.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -94,8 +51,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class PPCDebugger;
enum Register {
@@ -210,15 +166,11 @@ class Simulator {
// Executes PPC instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
@@ -234,6 +186,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -254,6 +209,8 @@ class Simulator {
end_sim_pc = -2
};
+ intptr_t CallImpl(byte* entry, int argument_count, const intptr_t* arguments);
+
enum BCType { BC_OFFSET, BC_LINK_REG, BC_CTR_REG };
// Unsupported instructions use Format to print an error and stop execution.
@@ -341,11 +298,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
@@ -481,43 +433,8 @@ class Simulator {
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call( \
- entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
- (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_PPC_SIMULATOR_PPC_H_
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 91617d7231..ac8f55a89b 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -322,8 +322,8 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
static_entries_.clear();
RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
CodeMap* code_map = generator_->code_map();
- for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
- RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
+ for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
+ RuntimeCallCounter* counter = rcs->GetCounter(i);
DCHECK(counter->name());
std::unique_ptr<CodeEntry> entry(
new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 6f3a952d1f..40779d9e5f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1905,6 +1905,9 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
STRUCT_LIST(STRUCT_MAP_NAME)
#undef STRUCT_MAP_NAME
+#define DATA_HANDLER_MAP_NAME(NAME, Name, Size, name) NAME_ENTRY(name##_map)
+ DATA_HANDLER_LIST(DATA_HANDLER_MAP_NAME)
+#undef DATA_HANDLER_MAP_NAME
#define STRING_NAME(name, str) NAME_ENTRY(name)
INTERNALIZED_STRING_LIST(STRING_NAME)
#undef STRING_NAME
@@ -2732,10 +2735,10 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
static const char hex_chars[] = "0123456789ABCDEF";
w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 12) & 0xF]);
+ w->AddCharacter(hex_chars[(u >> 8) & 0xF]);
+ w->AddCharacter(hex_chars[(u >> 4) & 0xF]);
+ w->AddCharacter(hex_chars[u & 0xF]);
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 070432225a..2dacd5a9fe 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -12,6 +12,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
#include "src/profiler/strings-storage.h"
#include "src/string-hasher.h"
#include "src/visitors.h"
@@ -26,6 +27,8 @@ class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
class JSArrayBuffer;
+class JSCollection;
+class JSWeakCollection;
class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 9570c77dd2..bb6ede6d95 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -332,14 +332,6 @@ ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
}
-struct NodesPair {
- NodesPair(ProfileNode* src, ProfileNode* dst)
- : src(src), dst(dst) { }
- ProfileNode* src;
- ProfileNode* dst;
-};
-
-
class Position {
public:
explicit Position(ProfileNode* node)
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index fecfdb66b0..bd2f158e60 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -259,7 +259,7 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
DCHECK(last_position.IsKnown());
std::vector<CpuProfileDeoptFrame> inlined_frames;
for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
- DCHECK_NE(pos_info.position.ScriptOffset(), kNoSourcePosition);
+ if (pos_info.position.ScriptOffset() == kNoSourcePosition) continue;
if (!pos_info.function->script()->IsScript()) continue;
int script_id = Script::cast(pos_info.function->script())->id();
size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 51fe8866fa..fef21550ec 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -66,24 +66,15 @@ SamplingHeapProfiler::SamplingHeapProfiler(
rate_(rate),
flags_(flags) {
CHECK_GT(rate_, 0u);
- heap->new_space()->AddAllocationObserver(new_space_observer_.get());
- AllSpaces spaces(heap);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- if (space != heap->new_space()) {
- space->AddAllocationObserver(other_spaces_observer_.get());
- }
- }
+
+ heap_->AddAllocationObserversToAllSpaces(other_spaces_observer_.get(),
+ new_space_observer_.get());
}
SamplingHeapProfiler::~SamplingHeapProfiler() {
- heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
- AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
- if (space != heap_->new_space()) {
- space->RemoveAllocationObserver(other_spaces_observer_.get());
- }
- }
+ heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
+ new_space_observer_.get());
for (auto sample : samples_) {
delete sample;
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index f4ca28c19f..44bf9af3d1 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -34,23 +34,23 @@ bool IsNoFrameRegion(i::Address address) {
#if V8_HOST_ARCH_IA32
// push %ebp
// mov %esp,%ebp
- {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ {3, {0x55, 0x89, 0xE5}, {0, 1, -1}},
// pop %ebp
// ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
+ {2, {0x5D, 0xC2}, {0, 1, -1}},
// pop %ebp
// ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
+ {2, {0x5D, 0xC3}, {0, 1, -1}},
#elif V8_HOST_ARCH_X64
// pushq %rbp
// movq %rsp,%rbp
- {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ {4, {0x55, 0x48, 0x89, 0xE5}, {0, 1, -1}},
// popq %rbp
// ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
+ {2, {0x5D, 0xC2}, {0, 1, -1}},
// popq %rbp
// ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
+ {2, {0x5D, 0xC3}, {0, 1, -1}},
#endif
{0, {}, {}}
};
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 2e6425568b..5f9d3905a3 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -86,8 +86,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -506,12 +505,12 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ sub(r0, current_character(), Operand('\t'));
__ cmp(r0, Operand('\r' - '\t'));
__ b(ls, &success);
// \u00a0 (NBSP).
- __ cmp(r0, Operand(0x00a0 - '\t'));
+ __ cmp(r0, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -533,37 +532,37 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(ls, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(r0, r0, Operand(0x0B));
+ __ cmp(r0, Operand(0x0C - 0x0B));
BranchOrBacktrack(ls, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(r0, r0, Operand(0x2028 - 0x0B));
__ cmp(r0, Operand(1));
BranchOrBacktrack(ls, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(r0, r0, Operand(0x0B));
+ __ cmp(r0, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(hi, on_no_match);
} else {
Label done;
__ b(ls, &done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(r0, r0, Operand(0x2028 - 0x0B));
__ cmp(r0, Operand(1));
BranchOrBacktrack(hi, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 558ee673f1..5f77ff4021 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -96,8 +96,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm64/simulator-arm64.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -116,7 +115,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
success_label_(),
backtrack_label_(),
exit_label_() {
- __ SetStackPointer(csp);
DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
@@ -366,7 +364,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
+ __ Check(le, AbortReason::kOffsetOutOfRange);
}
} else {
DCHECK(mode_ == UC16);
@@ -503,7 +501,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
+ __ Check(le, AbortReason::kOffsetOutOfRange);
}
__ Bind(&fallthrough);
}
@@ -588,11 +586,11 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
- // Check for ' ' or 0x00a0.
+ // Check for ' ' or 0x00A0.
__ Cmp(current_character(), ' ');
- __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ Ccmp(current_character(), 0x00A0, ZFlag, ne);
__ B(eq, &success);
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Sub(w10, current_character(), '\t');
CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
__ Bind(&success);
@@ -613,12 +611,12 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
// Here we emit the conditional branch only once at the end to make branch
// prediction more efficient, even though we could branch out of here
// as soon as a character matches.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ __ Cmp(current_character(), 0x0A);
+ __ Ccmp(current_character(), 0x0D, ZFlag, ne);
if (mode_ == UC16) {
__ Sub(w10, current_character(), 0x2028);
// If the Z flag was set we clear the flags to force a branch.
@@ -631,11 +629,11 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
// We have to check all 4 newline characters before emitting
// the conditional branch.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ __ Cmp(current_character(), 0x0A);
+ __ Ccmp(current_character(), 0x0D, ZFlag, ne);
if (mode_ == UC16) {
__ Sub(w10, current_character(), 0x2028);
// If the Z flag was set we clear the flags to force a fall-through.
@@ -791,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Check that the size of the input string chars is in range.
__ Neg(x11, x10);
__ Cmp(x11, SeqTwoByteString::kMaxCharsSize);
- __ Check(ls, kInputStringTooLong);
+ __ Check(ls, AbortReason::kInputStringTooLong);
}
__ Mov(current_input_offset(), w10);
@@ -855,7 +853,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
if (masm_->emit_debug_code()) {
// Check that the size of the input string chars is in range.
__ Cmp(x10, SeqTwoByteString::kMaxCharsSize);
- __ Check(ls, kInputStringTooLong);
+ __ Check(ls, AbortReason::kInputStringTooLong);
}
// input_start has a start_offset offset on entry. We need to include
// it when computing the length of the whole string.
@@ -1158,7 +1156,7 @@ void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
if (masm_->emit_debug_code()) {
__ Cmp(x10, kWRegMask);
// The code offset has to fit in a W register.
- __ Check(ls, kOffsetOutOfRange);
+ __ Check(ls, AbortReason::kOffsetOutOfRange);
}
}
Push(w10);
@@ -1314,7 +1312,7 @@ void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
if (masm_->emit_debug_code()) {
__ Cmp(x10, Operand(w10, SXTW));
// The stack offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
}
StoreRegister(reg, w10);
}
@@ -1623,7 +1621,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
__ Add(x10, x10, Operand(current_input_offset(), SXTW));
__ Cmp(x10, Operand(w10, SXTW));
// The offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
} else {
__ Add(w10, current_input_offset(), cp_offset * char_size());
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 99d1466f54..cb240d6c67 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -531,12 +531,12 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmp(current_character(), ' ');
__ j(equal, &success, Label::kNear);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ lea(eax, Operand(current_character(), -'\t'));
__ cmp(eax, '\r' - '\t');
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
- __ cmp(eax, 0x00a0 - '\t');
+ __ cmp(eax, 0x00A0 - '\t');
BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
@@ -558,18 +558,18 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ mov(eax, current_character());
__ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(eax, Immediate(0x0B));
+ __ cmp(eax, 0x0C - 0x0B);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(eax, Immediate(0x2028 - 0x0B));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
@@ -610,13 +610,13 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Match any character.
return true;
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
__ mov(eax, current_character());
__ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ sub(eax, Immediate(0x0B));
+ __ cmp(eax, 0x0C - 0x0B);
if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
@@ -624,9 +624,9 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, &done);
DCHECK_EQ(UC16, mode_);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ sub(eax, Immediate(0x2028 - 0x0B));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 9d56e4cfa3..a26a1d77ce 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -98,12 +98,36 @@ ContainedInLattice AddRange(ContainedInLattice containment,
return containment;
}
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
+// More makes code generation slower, less makes V8 benchmark score lower.
+const int kMaxLookaheadForBoyerMoore = 8;
// In a 3-character pattern you can maximally step forwards 3 characters
// at a time, which is not always enough to pay for the extra logic.
const int kPatternTooShortForBoyerMoore = 2;
+// Identifies the sort of regexps where the regexp engine is faster
+// than the code used for atom matches.
+static bool HasFewDifferentCharacters(Handle<String> pattern) {
+ int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
+ if (length <= kPatternTooShortForBoyerMoore) return false;
+ const int kMod = 128;
+ bool character_found[kMod];
+ int different = 0;
+ memset(&character_found[0], 0, sizeof(character_found));
+ for (int i = 0; i < length; i++) {
+ int ch = (pattern->Get(i) & (kMod - 1));
+ if (!character_found[ch]) {
+ character_found[ch] = true;
+ different++;
+ // We declare a regexp low-alphabet if it has at least 3 times as many
+ // characters as it has different characters.
+ if (different * 3 > length) return false;
+ }
+ }
+ return true;
+}
+
+// Generic RegExp methods. Dispatches to implementation specific methods.
+
MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
@@ -133,7 +157,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
bool has_been_compiled = false;
if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
- pattern->length() <= kPatternTooShortForBoyerMoore) {
+ !HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
@@ -141,12 +165,11 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- if (!IgnoreCase(atom->flags()) &&
- atom_pattern.length() <= kPatternTooShortForBoyerMoore) {
- Handle<String> atom_string;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, atom_string,
- isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ Handle<String> atom_string;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, atom_string,
+ isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
AtomCompile(re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -2433,8 +2456,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
} else {
// For 2-character preloads in one-byte mode or 1-character preloads in
// two-byte mode we also use a 16 bit load with zero extend.
- static const uint32_t kTwoByteMask = 0xffff;
- static const uint32_t kFourByteMask = 0xffffffff;
+ static const uint32_t kTwoByteMask = 0xFFFF;
+ static const uint32_t kFourByteMask = 0xFFFFFFFF;
if (details->characters() == 2 && compiler->one_byte()) {
if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
} else if (details->characters() == 1 && !compiler->one_byte()) {
@@ -2554,6 +2577,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
details->positions(characters_filled_in);
RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(zone());
+ DCHECK(!ranges->is_empty());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
// useful way to incorporate a negative char class into this scheme
@@ -2716,12 +2740,11 @@ RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
return set_replacement(this);
}
-
-// We need to check for the following characters: 0x39c 0x3bc 0x178.
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
// TODO(dcarney): this could be a lot more efficient.
- return range.Contains(0x39c) ||
- range.Contains(0x3bc) || range.Contains(0x178);
+ return range.Contains(0x039C) || range.Contains(0x03BC) ||
+ range.Contains(0x0178);
}
@@ -2973,7 +2996,7 @@ static void EmitHat(RegExpCompiler* compiler,
new_trace.backtrack())) {
// Newline means \n, \r, 0x2028 or 0x2029.
if (!compiler->one_byte()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
}
assembler->CheckCharacter('\n', &ok);
assembler->CheckNotCharacter('\r', new_trace.backtrack());
@@ -2982,8 +3005,6 @@ static void EmitHat(RegExpCompiler* compiler,
on_success->Emit(compiler, &new_trace);
}
-// More makes code generation slower, less makes V8 benchmark score lower.
-const int kMaxLookaheadForBoyerMoore = 8;
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
@@ -3253,9 +3274,9 @@ TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
JSRegExp::Flags flags) {
DCHECK_NOT_NULL(ranges);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(
- TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, flags)),
- zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, ranges, flags)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -3268,10 +3289,10 @@ TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(lead_ranges, flags)),
+ new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
zone);
elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(trail_ranges, flags)),
+ new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -5089,10 +5110,9 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
ranges = negated;
}
if (ranges->length() == 0) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ranges->Add(CharacterRange::Everything(), zone);
+ JSRegExp::Flags default_flags;
RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(ranges, default_flags, NEGATED);
+ new (zone) RegExpCharacterClass(zone, ranges, default_flags);
return new (zone) TextNode(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
@@ -5346,8 +5366,8 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
if (IsUnicode(flags) && contains_trail_surrogate) {
character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
}
- alternatives->at(write_posn++) =
- new (zone) RegExpCharacterClass(ranges, flags, character_class_flags);
+ alternatives->at(write_posn++) = new (zone)
+ RegExpCharacterClass(zone, ranges, flags, character_class_flags);
} else {
// Just copy any trivial alternatives.
for (int j = first_in_run; j < i; j++) {
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index e45eeeb492..89046a56f3 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -85,8 +85,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in mips/simulator-mips.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -509,11 +508,11 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Subu(a0, current_character(), Operand('\t'));
__ Branch(&success, ls, a0, Operand('\r' - '\t'));
// \u00a0 (NBSP).
- BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
__ bind(&success);
return true;
}
@@ -532,34 +531,34 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Subu(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Subu(a0, a0, Operand(0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Subu(a0, a0, Operand(0x0B));
if (mode_ == LATIN1) {
- BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
} else {
Label done;
- BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
__ bind(&done);
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 68a7f87843..841b2931fe 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -120,8 +120,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in mips/simulator-mips.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*
* clang-format on
*/
@@ -540,11 +539,11 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
- // Check range 0x09..0x0d.
+ // Check range 0x09..0x0D.
__ Dsubu(a0, current_character(), Operand('\t'));
__ Branch(&success, ls, a0, Operand('\r' - '\t'));
// \u00a0 (NBSP).
- BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
__ bind(&success);
return true;
}
@@ -563,34 +562,34 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Dsubu(a0, a0, Operand(0x0b));
- BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Dsubu(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
__ Xor(a0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
- __ Dsubu(a0, a0, Operand(0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Dsubu(a0, a0, Operand(0x0B));
if (mode_ == LATIN1) {
- BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
} else {
Label done;
- BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Dsubu(a0, a0, Operand(0x2028 - 0x0B));
BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
__ bind(&done);
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index bc3e643369..1187fc04b8 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -86,8 +86,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in ppc/simulator-ppc.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -522,12 +521,12 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmpi(current_character(), Operand(' '));
__ beq(&success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ subi(r3, current_character(), Operand('\t'));
__ cmpli(r3, Operand('\r' - '\t'));
__ ble(&success);
// \u00a0 (NBSP).
- __ cmpi(r3, Operand(0x00a0 - '\t'));
+ __ cmpi(r3, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -549,37 +548,37 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(le, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ xori(r3, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subi(r3, r3, Operand(0x0b));
- __ cmpli(r3, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subi(r3, r3, Operand(0x0B));
+ __ cmpli(r3, Operand(0x0C - 0x0B));
BranchOrBacktrack(le, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subi(r3, r3, Operand(0x2028 - 0x0B));
__ cmpli(r3, Operand(1));
BranchOrBacktrack(le, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ xori(r3, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subi(r3, r3, Operand(0x0b));
- __ cmpli(r3, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subi(r3, r3, Operand(0x0B));
+ __ cmpli(r3, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(gt, on_no_match);
} else {
Label done;
__ ble(&done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subi(r3, r3, Operand(0x2028 - 0x0B));
__ cmpli(r3, Operand(1));
BranchOrBacktrack(gt, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index e60621f8b6..1a94832f71 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -306,11 +306,17 @@ class RegExpCharacterClass final : public RegExpTree {
typedef base::Flags<Flag> CharacterClassFlags;
RegExpCharacterClass(
- ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
+ Zone* zone, ZoneList<CharacterRange>* ranges, JSRegExp::Flags flags,
CharacterClassFlags character_class_flags = CharacterClassFlags())
: set_(ranges),
flags_(flags),
- character_class_flags_(character_class_flags) {}
+ character_class_flags_(character_class_flags) {
+ // Convert the empty set of ranges to the negated Everything() range.
+ if (ranges->is_empty()) {
+ ranges->Add(CharacterRange::Everything(), zone);
+ character_class_flags_ ^= NEGATED;
+ }
+ }
RegExpCharacterClass(uc16 type, JSRegExp::Flags flags)
: set_(type),
flags_(flags),
@@ -352,7 +358,7 @@ class RegExpCharacterClass final : public RegExpTree {
private:
CharacterSet set_;
const JSRegExp::Flags flags_;
- const CharacterClassFlags character_class_flags_;
+ CharacterClassFlags character_class_flags_;
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 600757a72b..af285abcb0 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -286,9 +286,15 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(
- isolate, code->entry(), input, start_offset, input_start, input_end,
- output, output_size, stack_base, direct_call, isolate);
+
+ using RegexpMatcherSig = int(
+ String * input, int start_offset, // NOLINT(readability/casting)
+ const byte* input_start, const byte* input_end, int* output,
+ int output_size, Address stack_base, int direct_call, Isolate* isolate);
+
+ auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
+ int result = fn.Call(input, start_offset, input_start, input_end, output,
+ output_size, stack_base, direct_call, isolate);
DCHECK(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
@@ -299,7 +305,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
return static_cast<Result>(result);
}
-
+// clang-format off
const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
@@ -308,18 +314,18 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
- 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
-
- 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
- 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
- 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // '0' - '7'
+ 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
+
+ 0x00u, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'A' - 'G'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'H' - 'O'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'P' - 'W'
+ 0xFFu, 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0xFFu, // 'X' - 'Z', '_'
+
+ 0x00u, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'a' - 'g'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'h' - 'o'
+ 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, 0xFFu, // 'p' - 'w'
+ 0xFFu, 0xFFu, 0xFFu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
// Latin-1 range
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
@@ -341,7 +347,7 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
};
-
+// clang-format on
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 2c6aa5b23a..a7724c5d42 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -280,12 +280,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// Everything.
CharacterRange::AddClassEscape('*', ranges, false, zone());
} else {
- // Everything except \x0a, \x0d, \u2028 and \u2029
+ // Everything except \x0A, \x0D, \u2028 and \u2029
CharacterRange::AddClassEscape('.', ranges, false, zone());
}
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ new (zone()) RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
@@ -332,8 +332,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
new (zone()) ZoneList<CharacterRange>(2, zone());
CharacterRange::AddClassEscape(
c, ranges, unicode() && builder->ignore_case(), zone());
- RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
break;
}
@@ -348,8 +348,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (!ParsePropertyClass(ranges, p == 'P')) {
return ReportError(CStrVector("Invalid property name"));
}
- RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, builder->flags());
+ RegExpCharacterClass* cc = new (zone())
+ RegExpCharacterClass(zone(), ranges, builder->flags());
builder->AddCharacterClass(cc);
} else {
// With /u, no identity escapes except for syntax characters
@@ -451,7 +451,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter('\\');
} else {
Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
+ builder->AddCharacter(controlLetter & 0x1F);
}
break;
}
@@ -1145,7 +1145,7 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
if (current() == '{' && unicode()) {
int start = position();
Advance();
- if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
+ if (ParseUnlimitedLengthHexNumber(0x10FFFF, value)) {
if (current() == '}') {
Advance();
return true;
@@ -1255,10 +1255,15 @@ bool LookupSpecialPropertyValueName(const char* name,
ZoneList<CharacterRange>* result,
bool negate, Zone* zone) {
if (NameEquals(name, "Any")) {
- if (!negate) result->Add(CharacterRange::Everything(), zone);
+ if (negate) {
+ // Leave the list of character ranges empty, since the negation of 'Any'
+ // is the empty set.
+ } else {
+ result->Add(CharacterRange::Everything(), zone);
+ }
} else if (NameEquals(name, "ASCII")) {
result->Add(negate ? CharacterRange::Range(0x80, String::kMaxCodePoint)
- : CharacterRange::Range(0x0, 0x7f),
+ : CharacterRange::Range(0x0, 0x7F),
zone);
} else if (NameEquals(name, "Assigned")) {
return LookupPropertyValueName(UCHAR_GENERAL_CATEGORY, "Unassigned",
@@ -1486,8 +1491,8 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if (letter >= 'A' && letter <= 'Z') {
Advance(2);
// Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
+ // 0x00-0x1F.
+ return controlLetter & 0x1F;
}
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
@@ -1497,7 +1502,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if ((controlLetter >= '0' && controlLetter <= '9') ||
controlLetter == '_') {
Advance(2);
- return controlLetter & 0x1f;
+ return controlLetter & 0x1F;
}
// We match JSC in reading the backslash as a literal
// character instead of as starting an escape.
@@ -1672,14 +1677,10 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
return ReportError(CStrVector(kUnterminated));
}
Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything(), zone());
- is_negated = !is_negated;
- }
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return new (zone())
- RegExpCharacterClass(ranges, builder->flags(), character_class_flags);
+ return new (zone()) RegExpCharacterClass(zone(), ranges, builder->flags(),
+ character_class_flags);
}
@@ -1853,7 +1854,8 @@ void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
AddTerm(new (zone()) RegExpCharacterClass(
- CharacterRange::List(zone(), CharacterRange::Singleton(c)), flags_));
+ zone(), CharacterRange::List(zone(), CharacterRange::Singleton(c)),
+ flags_));
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 16427e2933..d483125dd6 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -134,7 +134,7 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
-#if defined(DEBUG) || defined(ENABLE_SLOWFAST_SWITCH)
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
#endif
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index fc9548fc78..4f8f234171 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -88,8 +88,7 @@ namespace internal {
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in s390/simulator-s390.h.
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*/
#define __ ACCESS_MASM(masm_)
@@ -493,12 +492,12 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
Label success;
__ CmpP(current_character(), Operand(' '));
__ beq(&success);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ SubP(r2, current_character(), Operand('\t'));
__ CmpLogicalP(r2, Operand('\r' - '\t'));
__ ble(&success);
// \u00a0 (NBSP).
- __ CmpLogicalP(r2, Operand(0x00a0 - '\t'));
+ __ CmpLogicalP(r2, Operand(0x00A0 - '\t'));
BranchOrBacktrack(ne, on_no_match);
__ bind(&success);
return true;
@@ -520,37 +519,37 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(le, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ SubP(r2, Operand(0x0b));
- __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ SubP(r2, Operand(0x0B));
+ __ CmpLogicalP(r2, Operand(0x0C - 0x0B));
BranchOrBacktrack(le, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ SubP(r2, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ SubP(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(le, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ SubP(r2, Operand(0x0b));
- __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ SubP(r2, Operand(0x0B));
+ __ CmpLogicalP(r2, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(gt, on_no_match);
} else {
Label done;
__ ble(&done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ SubP(r2, Operand(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ SubP(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(gt, on_no_match);
__ bind(&done);
@@ -773,7 +772,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// and the following use of that register.
__ lay(r2, MemOperand(r2, num_saved_registers_ * kIntSize));
for (int i = 0; i < num_saved_registers_;) {
- if (false && i < num_saved_registers_ - 4) {
+ if ((false) && i < num_saved_registers_ - 4) {
// TODO(john.yan): Can be optimized by SIMD instructions
__ LoadMultipleP(r3, r6, register_location(i + 3));
if (mode_ == UC16) {
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 1e21182c35..eb57b29602 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -551,12 +551,12 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label success;
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
- // Check range 0x09..0x0d
+ // Check range 0x09..0x0D
__ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
- __ cmpl(rax, Immediate(0x00a0 - '\t'));
+ __ cmpl(rax, Immediate(0x00A0 - '\t'));
BranchOrBacktrack(not_equal, on_no_match);
__ bind(&success);
return true;
@@ -578,39 +578,39 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subl(rax, Immediate(0x0B));
+ __ cmpl(rax, Immediate(0x0C - 0x0B));
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subl(rax, Immediate(0x2028 - 0x0B));
__ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
+ __ subl(rax, Immediate(0x0B));
+ __ cmpl(rax, Immediate(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
BranchOrBacktrack(below_equal, &done);
// Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ subl(rax, Immediate(0x2028 - 0x0B));
__ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index ce513d2f92..47f644f619 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -57,7 +57,6 @@ RUNTIME_FUNCTION(Runtime_BigIntEqualToString) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1);
- rhs = String::Flatten(rhs);
bool result = BigInt::EqualToString(lhs, rhs);
return *isolate->factory()->ToBoolean(result);
}
@@ -108,7 +107,7 @@ RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
result = BigInt::Remainder(left, right);
break;
case Operation::kExponentiate:
- UNIMPLEMENTED();
+ result = BigInt::Exponentiate(left, right);
break;
case Operation::kBitwiseAnd:
result = BigInt::BitwiseAnd(left, right);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 37e647c7dd..7869e32dd1 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -339,7 +339,6 @@ bool AddDescriptorsByTemplate(
map->InitializeDescriptors(*descriptors,
LayoutDescriptor::FastPointerLayout());
-
if (elements_dictionary->NumberOfElements() > 0) {
if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
receiver, args)) {
@@ -454,7 +453,6 @@ bool InitClassPrototype(Isolate* isolate,
Map::SetPrototype(map, prototype_parent);
constructor->set_prototype_or_initial_map(*prototype);
map->SetConstructor(*constructor);
-
Handle<FixedArray> computed_properties(
class_boilerplate->instance_computed_properties(), isolate);
Handle<NumberDictionary> elements_dictionary_template(
@@ -467,8 +465,8 @@ bool InitClassPrototype(Isolate* isolate,
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
- map->set_dictionary_map(true);
- map->set_migration_target(false);
+ map->set_is_dictionary_map(true);
+ map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
@@ -519,10 +517,10 @@ bool InitClassConstructor(Isolate* isolate,
Handle<NameDictionary> properties_dictionary_template =
Handle<NameDictionary>::cast(properties_template);
- map->set_dictionary_map(true);
+ map->set_is_dictionary_map(true);
map->InitializeDescriptors(isolate->heap()->empty_descriptor_array(),
LayoutDescriptor::FastPointerLayout());
- map->set_migration_target(false);
+ map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
@@ -595,6 +593,14 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
}
+ if (FLAG_trace_maps) {
+ LOG(isolate,
+ MapEvent("InitialMap", nullptr, constructor->map(),
+ "init class constructor", constructor->shared()->DebugName()));
+ LOG(isolate, MapEvent("InitialMap", nullptr, prototype->map(),
+ "init class prototype"));
+ }
+
return prototype;
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 92ba3e6c3f..14b61b0ac6 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -57,6 +57,24 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
+ HandleScope scope(isolate);
+ StackLimitCheck check(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ DCHECK_EQ(function->feedback_vector()->optimization_marker(),
+ OptimizationMarker::kLogFirstExecution);
+ DCHECK(FLAG_log_function_events);
+ Handle<SharedFunctionInfo> sfi(function->shared());
+ LOG(isolate, FunctionEvent("first-execution", Script::cast(sfi->script()), -1,
+ 0, sfi->start_position(), sfi->end_position(),
+ sfi->DebugName()));
+ function->feedback_vector()->ClearOptimizationMarker();
+ // Return the code to continue execution, we don't care at this point whether
+ // this is for lazy compilation or has been eagerly complied.
+ return function->code();
+}
RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
HandleScope scope(isolate);
@@ -141,7 +159,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Handle<JSFunction> function = deoptimizer->function();
Deoptimizer::BailoutType type = deoptimizer->bailout_type();
- bool preserve_optimized_code = deoptimizer->preserve_optimized();
// TODO(turbofan): We currently need the native context to materialize
// the arguments object, but only to get to its map.
@@ -157,7 +174,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
isolate->set_context(Context::cast(top_frame->context()));
// Invalidate the underlying optimized code on non-lazy deopts.
- if (type != Deoptimizer::LAZY && !preserve_optimized_code) {
+ if (type != Deoptimizer::LAZY) {
Deoptimizer::DeoptimizeFunction(*function);
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index d7395c7a7f..d6e028b41e 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -1554,6 +1554,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (script->type() == Script::TYPE_WASM) {
return WasmCompiledModule::cast(script->wasm_compiled_module())
+ ->shared()
->GetFunctionOffset(line);
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index c78ac8f6b1..e9433d2041 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -63,7 +63,9 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSFunction()) {
- return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+ Handle<SharedFunctionInfo> shared(
+ Handle<JSFunction>::cast(function)->shared());
+ return *SharedFunctionInfo::GetSourceCode(shared);
}
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 6d0e2b8439..f9e9375543 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "src/api.h"
#include "src/arguments.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
@@ -650,5 +651,22 @@ RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
description, isolate->native_context());
}
+RUNTIME_FUNCTION(Runtime_ReportMessage) {
+ // Helper to report messages and continue JS execution. This is intended to
+ // behave similarly to reporting exceptions which reach the top-level in
+ // Execution.cc, but allow the JS code to continue. This is useful for
+ // implementing algorithms such as RunMicrotasks in JS.
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, message_obj, 0);
+
+ DCHECK(!isolate->has_pending_exception());
+ isolate->set_pending_exception(*message_obj);
+ isolate->ReportPendingMessagesFromJavaScript();
+ isolate->clear_pending_exception();
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index bb16a772c0..a9fb48f887 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -19,11 +19,9 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
Handle<Script> script(Script::cast(function->shared()->script()));
- while (script->eval_from_shared()->IsSharedFunctionInfo()) {
- script = handle(
- Script::cast(
- SharedFunctionInfo::cast(script->eval_from_shared())->script()),
- isolate);
+ while (script->has_eval_from_shared()) {
+ script =
+ handle(Script::cast(script->eval_from_shared()->script()), isolate);
}
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 8e351b3c74..260e6be45b 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -69,16 +69,6 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
return *isolate->factory()->NewNumber(value);
}
-
-RUNTIME_FUNCTION(Runtime_NumberToString) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
-
- return *isolate->factory()->NumberToString(number);
-}
-
-
RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 057ead9407..379472bdbe 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -439,6 +439,61 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_ObjectValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectValuesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(value);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntries) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 1d8ca623e1..2c28cd3c98 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -70,22 +70,6 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 0);
- isolate->EnqueueMicrotask(info);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(PromiseResolveThenableJobInfo, info, 0);
- isolate->EnqueueMicrotask(info);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 472cbdf79d..a10260c1e2 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -38,14 +38,6 @@ RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
}
-RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
- JSProxy::Revoke(proxy);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 2ba760b847..d0afcd2636 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -544,7 +544,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xffffffff);
+ FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xFFFFFFFF);
if (indices->empty()) return *subject;
@@ -834,7 +834,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
int pattern_length = pattern->length();
CHECK_LT(0, pattern_length);
- if (limit == 0xffffffffu) {
+ if (limit == 0xFFFFFFFFu) {
FixedArray* last_match_cache_unused;
Handle<Object> cached_answer(
RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
@@ -849,7 +849,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
}
}
- // The limit can be very large (0xffffffffu), but since the pattern
+ // The limit can be very large (0xFFFFFFFFu), but since the pattern
// isn't empty, we can never create more parts than ~half the length
// of the subject.
@@ -890,7 +890,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
});
}
- if (limit == 0xffffffffu) {
+ if (limit == 0xFFFFFFFFu) {
if (result->HasObjectElements()) {
RegExpResultsCache::Enter(isolate, subject, pattern, elements,
isolate->factory()->empty_fixed_array(),
@@ -1804,6 +1804,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
uint32_t next_source_position = 0;
for (const auto& result : results) {
+ HandleScope handle_scope(isolate);
Handle<Object> captures_length_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 61795fc6cb..76f291f90f 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -9,6 +9,7 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/ast/scopes.h"
+#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
@@ -726,6 +727,9 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
Handle<JSFunction> closure(function->shared()->IsUserJavaScript()
? native_context->closure()
: *function);
+
+ // We do not need script contexts here during bootstrap.
+ DCHECK(!isolate->bootstrapper()->IsActive());
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 1e2d1f5a56..8f6b887f62 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -490,7 +490,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
int last_array_index = static_cast<int>(array_length - 1);
// Array length must be representable as a signed 32-bit number,
// otherwise the total string length would have been too large.
- DCHECK_LE(array_length, 0x7fffffff); // Is int32_t.
+ DCHECK_LE(array_length, 0x7FFFFFFF); // Is int32_t.
int repeat = last_array_index - previous_separator_position;
WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
cursor += repeat * separator_length;
@@ -537,7 +537,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
int separator_length = separator->length();
if (!overflow && separator_length > 0) {
- if (array_length <= 0x7fffffffu) {
+ if (array_length <= 0x7FFFFFFFu) {
int separator_count = static_cast<int>(array_length) - 1;
int remaining_length = String::kMaxLength - string_length;
if ((remaining_length / separator_length) >= separator_count) {
@@ -549,7 +549,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
} else {
// Nonempty separator and at least 2^31-1 separators necessary
// means that the string is too large to create.
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+ STATIC_ASSERT(String::kMaxLength < 0x7FFFFFFF);
overflow = true;
}
}
@@ -730,7 +730,7 @@ RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
DCHECK_EQ(1, args.length());
if (args[0]->IsNumber()) {
CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xffff;
+ code &= 0xFFFF;
return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
}
return isolate->heap()->empty_string();
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index b3cdf3fe67..01e2b198a6 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -15,10 +15,11 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/runtime-profiler.h"
-#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
@@ -41,8 +42,9 @@ bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
return (is_async && ctrls.AllowAnySizeForAsync) ||
- (v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
- ctrls.MaxWasmBufferSize);
+ (value->IsArrayBuffer() &&
+ v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
+ ctrls.MaxWasmBufferSize);
}
// Use the compile controls for instantiation, too
@@ -307,7 +309,8 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->DisableOptimization(kOptimizationDisabledForTest);
+ function->shared()->DisableOptimization(
+ BailoutReason::kOptimizationDisabledForTest);
return isolate->heap()->undefined_value();
}
@@ -499,8 +502,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
: rinfo->target_address();
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::Function) {
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::kFunction) {
++count;
export_fct = target;
}
@@ -523,8 +526,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
RelocInfo* rinfo = it.rinfo();
Address target_address = rinfo->target_address();
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::Function) {
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
+ if (target->kind() == wasm::WasmCode::kFunction) {
++count;
intermediate_fct = target;
}
@@ -549,8 +552,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
count = 0;
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode::Kind target_kind = type->value() == 0
- ? wasm::WasmCode::WasmToWasmWrapper
- : wasm::WasmCode::WasmToJsWrapper;
+ ? wasm::WasmCode::kWasmToWasmWrapper
+ : wasm::WasmCode::kWasmToJsWrapper;
for (RelocIterator it(intermediate_fct->instructions(),
intermediate_fct->reloc_info(),
intermediate_fct->constant_pool(),
@@ -559,7 +562,7 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
RelocInfo* rinfo = it.rinfo();
Address target_address = rinfo->target_address();
wasm::WasmCode* target =
- isolate->wasm_code_manager()->LookupCode(target_address);
+ isolate->wasm_engine()->code_manager()->LookupCode(target_address);
if (target->kind() == target_kind) {
++count;
}
@@ -614,10 +617,12 @@ RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
-#ifdef DEBUG
- CONVERT_INT32_ARG_CHECKED(interval, 0);
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
CONVERT_INT32_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
+#endif
+#ifdef DEBUG
+ CONVERT_INT32_ARG_CHECKED(interval, 0);
FLAG_gc_interval = interval;
if (args.length() == 3) {
// Enable/disable inline allocation if requested.
@@ -757,8 +762,7 @@ RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
- const char* message =
- GetBailoutReason(static_cast<BailoutReason>(message_id));
+ const char* message = GetAbortReason(static_cast<AbortReason>(message_id));
base::OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -936,8 +940,7 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
DisallowHeapAllocation no_gc;
DCHECK_EQ(0, args.length());
- bool is_enabled = trap_handler::UseTrapHandler();
- return isolate->heap()->ToBoolean(is_enabled);
+ return isolate->heap()->ToBoolean(trap_handler::IsTrapHandlerEnabled());
}
RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
@@ -993,24 +996,14 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
Handle<WasmCompiledModule> orig(module_obj->compiled_module());
- if (FLAG_wasm_jit_to_native) {
- std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
- wasm::NativeModuleSerializer::SerializeWholeModule(isolate, orig);
- int data_size = static_cast<int>(serialized_module.second);
- void* buff = isolate->array_buffer_allocator()->Allocate(data_size);
- Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(ret, isolate, false, buff, data_size);
- memcpy(buff, serialized_module.first.get(), data_size);
- return *ret;
- } else {
- std::unique_ptr<ScriptData> data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
- void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
- Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(ret, isolate, false, buff, data->length());
- memcpy(buff, data->data(), data->length());
- return *ret;
- }
+ std::pair<std::unique_ptr<const byte[]>, size_t> serialized_module =
+ wasm::SerializeNativeModule(isolate, orig);
+ int data_size = static_cast<int>(serialized_module.second);
+ void* buff = isolate->array_buffer_allocator()->Allocate(data_size);
+ Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(ret, isolate, false, buff, data_size);
+ memcpy(buff, serialized_module.first.get(), data_size);
+ return *ret;
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.
@@ -1024,39 +1017,28 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
Address mem_start = static_cast<Address>(buffer->backing_store());
size_t mem_size = static_cast<size_t>(buffer->byte_length()->Number());
- // DeserializeWasmModule will allocate. We assume JSArrayBuffer doesn't
- // get relocated.
+ // Note that {wasm::DeserializeNativeModule} will allocate. We assume the
+ // JSArrayBuffer doesn't get relocated.
bool already_external = wire_bytes->is_external();
if (!already_external) {
wire_bytes->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
}
- MaybeHandle<FixedArray> maybe_compiled_module;
- if (FLAG_wasm_jit_to_native) {
- maybe_compiled_module =
- wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- isolate, {mem_start, mem_size},
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
- } else {
- ScriptData sc(mem_start, static_cast<int>(mem_size));
- maybe_compiled_module = WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate, &sc,
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
- }
+ MaybeHandle<WasmCompiledModule> maybe_compiled_module =
+ wasm::DeserializeNativeModule(
+ isolate, {mem_start, mem_size},
+ Vector<const uint8_t>(
+ reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<int>(wire_bytes->byte_length()->Number())));
if (!already_external) {
wire_bytes->set_is_external(false);
isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
}
- Handle<FixedArray> compiled_module;
+ Handle<WasmCompiledModule> compiled_module;
if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return isolate->heap()->undefined_value();
}
- return *WasmModuleObject::New(
- isolate, Handle<WasmCompiledModule>::cast(compiled_module));
+ return *WasmModuleObject::New(isolate, compiled_module);
}
RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
@@ -1125,11 +1107,11 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
HandleScope hs(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_SMI_ARG_CHECKED(is_store, 0);
- CONVERT_SMI_ARG_CHECKED(mem_rep, 1);
- CONVERT_SMI_ARG_CHECKED(addr_low, 2);
- CONVERT_SMI_ARG_CHECKED(addr_high, 3);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Smi, info_addr, 0);
+
+ wasm::MemoryTracingInfo* info =
+ reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr);
// Find the caller wasm frame.
StackTraceFrameIterator it(isolate);
@@ -1137,8 +1119,6 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
DCHECK(it.is_wasm());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
- uint32_t addr = (static_cast<uint32_t>(addr_low) & 0xffff) |
- (static_cast<uint32_t>(addr_high) << 16);
uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance()
->memory_object()
->array_buffer()
@@ -1148,9 +1128,11 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
// TODO(titzer): eliminate dependency on WasmModule definition here.
int func_start =
frame->wasm_instance()->module()->functions[func_index].code.offset();
- tracing::TraceMemoryOperation(tracing::kWasmCompiled, is_store,
- MachineRepresentation(mem_rep), addr,
- func_index, pos - func_start, mem_start);
+ wasm::ExecutionEngine eng = frame->wasm_code().is_liftoff()
+ ? wasm::ExecutionEngine::kLiftoff
+ : wasm::ExecutionEngine::kTurbofan;
+ wasm::TraceMemoryOperation(eng, info, func_index, pos - func_start,
+ mem_start);
return isolate->heap()->undefined_value();
}
@@ -1180,5 +1162,19 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
+ DCHECK_EQ(1, args.length());
+ DisallowHeapAllocation no_gc;
+ CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
+
+ WasmSharedModuleData* shared = instance->compiled_module()->shared();
+ CHECK(shared->has_lazy_compilation_orchestrator());
+ auto* orchestrator = Managed<wasm::LazyCompilationOrchestrator>::cast(
+ shared->lazy_compilation_orchestrator())
+ ->get();
+ orchestrator->FreezeLazyCompilationForTesting();
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 5820c4b6a4..85fb2d2173 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -214,101 +214,6 @@ RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
return *result_array;
}
-namespace {
-
-Object* TypedArraySetFromOverlapping(Isolate* isolate,
- Handle<JSTypedArray> target,
- Handle<JSTypedArray> source,
- uint32_t offset) {
-#ifdef DEBUG
- Handle<FixedTypedArrayBase> source_elements(
- FixedTypedArrayBase::cast(source->elements()));
- Handle<FixedTypedArrayBase> target_elements(
- FixedTypedArrayBase::cast(target->elements()));
- uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
- uint8_t* target_data = static_cast<uint8_t*>(target_elements->DataPtr());
- size_t source_byte_length = NumberToSize(source->byte_length());
- size_t target_byte_length = NumberToSize(target->byte_length());
-
- CHECK_LE(offset, target->length_value());
- CHECK_LE(source->length_value(), target->length_value() - offset);
- CHECK(source->length()->IsSmi());
-
- CHECK(!target->WasNeutered());
- CHECK(!source->WasNeutered());
-
- // Assert that target and source in fact overlapping.
- CHECK(target_data + target_byte_length > source_data &&
- source_data + source_byte_length > target_data);
-#endif
-
- size_t sourceElementSize = source->element_size();
- size_t targetElementSize = target->element_size();
-
- uint32_t source_length = source->length_value();
- if (source_length == 0) return isolate->heap()->undefined_value();
-
- // Copy left part.
-
- // First un-mutated byte after the next write
- uint32_t target_ptr = 0;
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + 1) * targetElementSize;
-
- // Next read at sourcePtr. We do not care for memory changing before
- // sourcePtr - we have already copied it.
- uint32_t source_ptr = 0;
- CHECK(source->byte_offset()->ToUint32(&source_ptr));
-
- ElementsAccessor* source_accessor = source->GetElementsAccessor();
- ElementsAccessor* target_accessor = target->GetElementsAccessor();
-
- uint32_t left_index;
- for (left_index = 0; left_index < source_length && target_ptr <= source_ptr;
- left_index++) {
- Handle<Object> value = source_accessor->Get(source, left_index);
- target_accessor->Set(target, offset + left_index, *value);
-
- target_ptr += targetElementSize;
- source_ptr += sourceElementSize;
- }
-
- // Copy right part;
- // First unmutated byte before the next write
- CHECK(target->byte_offset()->ToUint32(&target_ptr));
- target_ptr += (offset + source_length - 1) * targetElementSize;
-
- // Next read before sourcePtr. We do not care for memory changing after
- // sourcePtr - we have already copied it.
- CHECK(target->byte_offset()->ToUint32(&source_ptr));
- source_ptr += source_length * sourceElementSize;
-
- uint32_t right_index;
- DCHECK_GE(source_length, 1);
- for (right_index = source_length - 1;
- right_index > left_index && target_ptr >= source_ptr; right_index--) {
- Handle<Object> value = source_accessor->Get(source, right_index);
- target_accessor->Set(target, offset + right_index, *value);
-
- target_ptr -= targetElementSize;
- source_ptr -= sourceElementSize;
- }
-
- std::vector<Handle<Object>> temp(right_index + 1 - left_index);
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- temp[i - left_index] = source_accessor->Get(source, i);
- }
-
- for (uint32_t i = left_index; i <= right_index; i++) {
- target_accessor->Set(target, offset + i, *temp[i - left_index]);
- }
-
- return isolate->heap()->undefined_value();
-}
-
-} // namespace
-
// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
RUNTIME_FUNCTION(Runtime_TypedArraySet) {
HandleScope scope(isolate);
@@ -317,6 +222,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
Handle<Smi> offset = args.at<Smi>(2);
DCHECK(!target->WasNeutered()); // Checked in TypedArrayPrototypeSet.
+ DCHECK(!obj->IsJSTypedArray()); // Should be handled by CSA.
DCHECK_LE(0, offset->value());
const uint32_t uint_offset = static_cast<uint32_t>(offset->value());
@@ -328,10 +234,6 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
// (Consistent with Firefox and Blink/WebKit)
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
- } else if (obj->IsJSTypedArray()) {
- // The non-overlapping case is handled in CSA.
- Handle<JSTypedArray> source = Handle<JSTypedArray>::cast(obj);
- return TypedArraySetFromOverlapping(isolate, target, source, uint_offset);
}
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj,
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index e8aef3fa97..0b002d0ec6 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -16,9 +16,10 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8memory.h"
#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
@@ -33,7 +34,7 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
WasmInstanceObject* owning_instance = nullptr;
if (FLAG_wasm_jit_to_native) {
owning_instance = WasmInstanceObject::GetOwningInstance(
- isolate->wasm_code_manager()->LookupCode(pc));
+ isolate->wasm_engine()->code_manager()->LookupCode(pc));
} else {
owning_instance = WasmInstanceObject::GetOwningInstanceGC(
isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code);
@@ -45,14 +46,14 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
Context* GetWasmContextOnStackTop(Isolate* isolate) {
return GetWasmInstanceOnStackTop(isolate)
->compiled_module()
- ->ptr_to_native_context();
+ ->native_context();
}
class ClearThreadInWasmScope {
public:
explicit ClearThreadInWasmScope(bool coming_from_wasm)
: coming_from_wasm_(coming_from_wasm) {
- DCHECK_EQ(trap_handler::UseTrapHandler() && coming_from_wasm,
+ DCHECK_EQ(trap_handler::IsTrapHandlerEnabled() && coming_from_wasm,
trap_handler::IsThreadInWasm());
if (coming_from_wasm) trap_handler::ClearThreadInWasm();
}
@@ -79,7 +80,7 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
- isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+ isolate->set_context(instance->compiled_module()->native_context());
return *isolate->factory()->NewNumberFromInt(
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages));
@@ -170,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_WasmGetExceptionRuntimeId) {
}
}
}
- return Smi::FromInt(wasm::WasmModule::kInvalidExceptionTag);
+ return Smi::FromInt(wasm::kInvalidExceptionTag);
}
RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
@@ -248,7 +249,7 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// Set the current isolate's context.
DCHECK_NULL(isolate->context());
- isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+ isolate->set_context(instance->compiled_module()->native_context());
// Find the frame pointer of the interpreter entry.
Address frame_pointer = 0;
@@ -275,7 +276,8 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- DCHECK(!trap_handler::UseTrapHandler() || trap_handler::IsThreadInWasm());
+ DCHECK(!trap_handler::IsTrapHandlerEnabled() ||
+ trap_handler::IsThreadInWasm());
ClearThreadInWasmScope wasm_flag(true);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index da16ee5fc8..487ee675ad 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -120,6 +120,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COMPILER(F) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
+ F(FunctionFirstExecution, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
F(NotifyDeoptimized, 0, 1) \
@@ -338,7 +339,8 @@ namespace internal {
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1) \
- F(GetTemplateObject, 1, 1)
+ F(GetTemplateObject, 1, 1) \
+ F(ReportMessage, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
@@ -373,7 +375,6 @@ namespace internal {
F(StringToNumber, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
- F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToSmi, 1, 1) \
F(SmiLexicographicCompare, 2, 1) \
@@ -390,6 +391,10 @@ namespace internal {
F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(AddNamedProperty, 4, 1) \
@@ -461,26 +466,23 @@ namespace internal {
F(GreaterThanOrEqual, 2, 1) \
F(InstanceOf, 2, 1)
-#define FOR_EACH_INTRINSIC_PROMISE(F) \
- F(EnqueueMicrotask, 1, 1) \
- F(EnqueuePromiseReactionJob, 1, 1) \
- F(EnqueuePromiseResolveThenableJob, 1, 1) \
- F(PromiseHookInit, 2, 1) \
- F(PromiseHookResolve, 1, 1) \
- F(PromiseHookBefore, 1, 1) \
- F(PromiseHookAfter, 1, 1) \
- F(PromiseMarkAsHandled, 1, 1) \
- F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(PromiseResult, 1, 1) \
- F(PromiseStatus, 1, 1) \
+#define FOR_EACH_INTRINSIC_PROMISE(F) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(PromiseHookInit, 2, 1) \
+ F(PromiseHookResolve, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookAfter, 1, 1) \
+ F(PromiseMarkAsHandled, 1, 1) \
+ F(PromiseRejectEventFromStack, 2, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(PromiseResult, 1, 1) \
+ F(PromiseStatus, 1, 1) \
F(ReportPromiseReject, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
F(JSProxyGetTarget, 1, 1) \
F(JSProxyGetHandler, 1, 1) \
- F(JSProxyRevoke, 1, 1) \
F(GetPropertyWithReceiver, 2, 1) \
F(CheckProxyHasTrap, 2, 1) \
F(SetPropertyWithReceiver, 5, 1) \
@@ -629,9 +631,10 @@ namespace internal {
F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
- F(WasmTraceMemory, 4, 1) \
+ F(WasmTraceMemory, 1, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
- F(IsLiftoffFunction, 1, 1)
+ F(IsLiftoffFunction, 1, 1) \
+ F(FreezeWasmLazyCompilation, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -681,6 +684,7 @@ namespace internal {
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 5, 1) \
F(StoreCallbackProperty, 6, 1) \
+ F(StoreGlobalIC_Miss, 4, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index d8d7ce4256..6323730b99 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -69,9 +69,9 @@ void RelocInfo::apply(intptr_t delta) {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(nullptr, pc_, host_, target + delta,
- SKIP_ICACHE_FLUSH);
+ Address target = Assembler::target_address_at(pc_, constant_pool_);
+ Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
+ target + delta, SKIP_ICACHE_FLUSH);
}
}
@@ -82,7 +82,7 @@ Address RelocInfo::target_internal_reference() {
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
}
@@ -93,7 +93,7 @@ Address RelocInfo::target_internal_reference_address() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -118,18 +118,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -153,15 +141,15 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
+ return HeapObject::cast(reinterpret_cast<Object*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
- Assembler::target_address_at(pc_, host_)));
+ Assembler::target_address_at(pc_, constant_pool_)));
} else {
return Handle<HeapObject>::cast(origin->code_target_object_handle_at(pc_));
}
@@ -171,7 +159,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
+ Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -183,7 +171,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
@@ -209,10 +197,10 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
}
}
@@ -294,14 +282,14 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- Code* code = nullptr;
- set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 70701beb72..166da1c451 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -271,22 +271,23 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() { return false; }
Address RelocInfo::embedded_address() const {
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
- return static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(
+ Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -441,7 +442,7 @@ int Assembler::target_at(int pos) {
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
BRASL == opcode) {
int32_t imm32 =
- static_cast<int32_t>(instr & (static_cast<uint64_t>(0xffffffff)));
+ static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
if (LLILF != opcode)
imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
if (imm32 == 0) return kEndOfChain;
@@ -465,14 +466,14 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
int16_t imm16 = target_pos - pos;
- instr &= (~0xffff);
+ instr &= (~0xFFFF);
DCHECK(is_int16(imm16));
instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
return;
} else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
// Immediate is in # of halfwords
int32_t imm32 = target_pos - pos;
- instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
return;
} else if (LLILF == opcode) {
@@ -480,7 +481,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | imm32);
return;
}
@@ -1491,8 +1492,8 @@ void Assembler::ark(Register r1, Register r2, Register r3) {
void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(ASI, Operand(0xff & imm.immediate()), opnd.rb(),
- 0xfffff & opnd.offset());
+ siy_form(ASI, Operand(0xFF & imm.immediate()), opnd.rb(),
+ 0xFFFFF & opnd.offset());
}
// -----------------------
@@ -1515,8 +1516,8 @@ void Assembler::agrk(Register r1, Register r2, Register r3) {
void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(AGSI, Operand(0xff & imm.immediate()), opnd.rb(),
- 0xfffff & opnd.offset());
+ siy_form(AGSI, Operand(0xFF & imm.immediate()), opnd.rb(),
+ 0xFFFFF & opnd.offset());
}
// -------------------------------
@@ -2091,9 +2092,9 @@ void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
bool Assembler::IsNop(SixByteInstr instr, int type) {
DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
if (DEBUG_BREAK_NOP == type) {
- return ((instr & 0xffffffff) == 0xa53b0000); // oill r3, 0
+ return ((instr & 0xFFFFFFFF) == 0xA53B0000); // oill r3, 0
}
- return ((instr & 0xffff) == 0x1800); // lr r0,r0
+ return ((instr & 0xFFFF) == 0x1800); // lr r0,r0
}
// dummy instruction reserved for special use.
@@ -2213,8 +2214,7 @@ void Assembler::EmitRelocations() {
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
- Code* code = nullptr;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(pc, rmode, it->data(), nullptr);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2223,8 +2223,8 @@ void Assembler::EmitRelocations() {
Memory::Address_at(pc) = buffer_ + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
- intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(nullptr, pc, code, buffer_ + pos,
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
+ set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
SKIP_ICACHE_FLUSH);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index e9863197a7..4a5945de87 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -276,6 +276,7 @@ constexpr Register kLithiumScratch = r1; // lithium scratch.
constexpr Register kRootRegister = r10; // Roots array pointer.
constexpr Register cp = r13; // JavaScript context pointer.
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -556,10 +557,6 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
- INLINE(static Address target_address_at(Address pc, Code* code));
- INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index d33d09c657..783b995c72 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -122,8 +122,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// scratch_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ ShiftRightArith(r0, scratch_high, Operand(31));
#if V8_TARGET_ARCH_S390X
__ lgfr(r0, r0);
@@ -495,6 +495,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// kCEntryFPAddress
// Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
+
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r10, Operand(-1));
@@ -512,6 +513,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lay(fp,
MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
+ __ InitializeRootRegister();
+
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
@@ -564,12 +567,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r4: receiver
// r5: argc
// r6: argv
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -783,7 +781,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -822,7 +820,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadP(r7, FieldMemOperand(r4, 0));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite);
+ __ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r5
@@ -846,7 +844,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -917,9 +915,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r6);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r4 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r4, r6);
@@ -996,9 +994,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ TestIfSmi(r5);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r5, r6, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -1013,7 +1011,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpP(r5, Operand(PACKED_ELEMENTS));
__ beq(&done);
__ CmpP(r5, Operand(HOLEY_ELEMENTS));
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ eq,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1118,7 +1118,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (__ emit_debug_code()) {
__ LoadlW(r3, MemOperand(r9, kLevelOffset));
__ CmpP(r3, r8);
- __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ SubP(r8, Operand(1));
__ StoreW(r8, MemOperand(r9, kLevelOffset));
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index e6c627da3a..df02570783 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -20,8 +20,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
#else
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -38,8 +37,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
diff --git a/deps/v8/src/s390/constants-s390.cc b/deps/v8/src/s390/constants-s390.cc
index da53613bc7..bda7f61cf4 100644
--- a/deps/v8/src/s390/constants-s390.cc
+++ b/deps/v8/src/s390/constants-s390.cc
@@ -22,12 +22,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x07
ONE_BYTE_OPCODE, // 0x08
ONE_BYTE_OPCODE, // 0x09
- ONE_BYTE_OPCODE, // 0x0a
- ONE_BYTE_OPCODE, // 0x0b
- ONE_BYTE_OPCODE, // 0x0c
- ONE_BYTE_OPCODE, // 0x0d
- ONE_BYTE_OPCODE, // 0x0e
- ONE_BYTE_OPCODE, // 0x0f
+ ONE_BYTE_OPCODE, // 0x0A
+ ONE_BYTE_OPCODE, // 0x0B
+ ONE_BYTE_OPCODE, // 0x0C
+ ONE_BYTE_OPCODE, // 0x0D
+ ONE_BYTE_OPCODE, // 0x0E
+ ONE_BYTE_OPCODE, // 0x0F
ONE_BYTE_OPCODE, // 0x10
ONE_BYTE_OPCODE, // 0x11
ONE_BYTE_OPCODE, // 0x12
@@ -38,12 +38,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x17
ONE_BYTE_OPCODE, // 0x18
ONE_BYTE_OPCODE, // 0x19
- ONE_BYTE_OPCODE, // 0x1a
- ONE_BYTE_OPCODE, // 0x1b
- ONE_BYTE_OPCODE, // 0x1c
- ONE_BYTE_OPCODE, // 0x1d
- ONE_BYTE_OPCODE, // 0x1e
- ONE_BYTE_OPCODE, // 0x1f
+ ONE_BYTE_OPCODE, // 0x1A
+ ONE_BYTE_OPCODE, // 0x1B
+ ONE_BYTE_OPCODE, // 0x1C
+ ONE_BYTE_OPCODE, // 0x1D
+ ONE_BYTE_OPCODE, // 0x1E
+ ONE_BYTE_OPCODE, // 0x1F
ONE_BYTE_OPCODE, // 0x20
ONE_BYTE_OPCODE, // 0x21
ONE_BYTE_OPCODE, // 0x22
@@ -54,12 +54,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x27
ONE_BYTE_OPCODE, // 0x28
ONE_BYTE_OPCODE, // 0x29
- ONE_BYTE_OPCODE, // 0x2a
- ONE_BYTE_OPCODE, // 0x2b
- ONE_BYTE_OPCODE, // 0x2c
- ONE_BYTE_OPCODE, // 0x2d
- ONE_BYTE_OPCODE, // 0x2e
- ONE_BYTE_OPCODE, // 0x2f
+ ONE_BYTE_OPCODE, // 0x2A
+ ONE_BYTE_OPCODE, // 0x2B
+ ONE_BYTE_OPCODE, // 0x2C
+ ONE_BYTE_OPCODE, // 0x2D
+ ONE_BYTE_OPCODE, // 0x2E
+ ONE_BYTE_OPCODE, // 0x2F
ONE_BYTE_OPCODE, // 0x30
ONE_BYTE_OPCODE, // 0x31
ONE_BYTE_OPCODE, // 0x32
@@ -70,12 +70,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x37
ONE_BYTE_OPCODE, // 0x38
ONE_BYTE_OPCODE, // 0x39
- ONE_BYTE_OPCODE, // 0x3a
- ONE_BYTE_OPCODE, // 0x3b
- ONE_BYTE_OPCODE, // 0x3c
- ONE_BYTE_OPCODE, // 0x3d
- ONE_BYTE_OPCODE, // 0x3e
- ONE_BYTE_OPCODE, // 0x3f
+ ONE_BYTE_OPCODE, // 0x3A
+ ONE_BYTE_OPCODE, // 0x3B
+ ONE_BYTE_OPCODE, // 0x3C
+ ONE_BYTE_OPCODE, // 0x3D
+ ONE_BYTE_OPCODE, // 0x3E
+ ONE_BYTE_OPCODE, // 0x3F
ONE_BYTE_OPCODE, // 0x40
ONE_BYTE_OPCODE, // 0x41
ONE_BYTE_OPCODE, // 0x42
@@ -86,12 +86,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x47
ONE_BYTE_OPCODE, // 0x48
ONE_BYTE_OPCODE, // 0x49
- ONE_BYTE_OPCODE, // 0x4a
- ONE_BYTE_OPCODE, // 0x4b
- ONE_BYTE_OPCODE, // 0x4c
- ONE_BYTE_OPCODE, // 0x4d
- ONE_BYTE_OPCODE, // 0x4e
- ONE_BYTE_OPCODE, // 0x4f
+ ONE_BYTE_OPCODE, // 0x4A
+ ONE_BYTE_OPCODE, // 0x4B
+ ONE_BYTE_OPCODE, // 0x4C
+ ONE_BYTE_OPCODE, // 0x4D
+ ONE_BYTE_OPCODE, // 0x4E
+ ONE_BYTE_OPCODE, // 0x4F
ONE_BYTE_OPCODE, // 0x50
ONE_BYTE_OPCODE, // 0x51
ONE_BYTE_OPCODE, // 0x52
@@ -102,12 +102,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x57
ONE_BYTE_OPCODE, // 0x58
ONE_BYTE_OPCODE, // 0x59
- ONE_BYTE_OPCODE, // 0x5a
- ONE_BYTE_OPCODE, // 0x5b
- ONE_BYTE_OPCODE, // 0x5c
- ONE_BYTE_OPCODE, // 0x5d
- ONE_BYTE_OPCODE, // 0x5e
- ONE_BYTE_OPCODE, // 0x5f
+ ONE_BYTE_OPCODE, // 0x5A
+ ONE_BYTE_OPCODE, // 0x5B
+ ONE_BYTE_OPCODE, // 0x5C
+ ONE_BYTE_OPCODE, // 0x5D
+ ONE_BYTE_OPCODE, // 0x5E
+ ONE_BYTE_OPCODE, // 0x5F
ONE_BYTE_OPCODE, // 0x60
ONE_BYTE_OPCODE, // 0x61
ONE_BYTE_OPCODE, // 0x62
@@ -118,12 +118,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x67
ONE_BYTE_OPCODE, // 0x68
ONE_BYTE_OPCODE, // 0x69
- ONE_BYTE_OPCODE, // 0x6a
- ONE_BYTE_OPCODE, // 0x6b
- ONE_BYTE_OPCODE, // 0x6c
- ONE_BYTE_OPCODE, // 0x6d
- ONE_BYTE_OPCODE, // 0x6e
- ONE_BYTE_OPCODE, // 0x6f
+ ONE_BYTE_OPCODE, // 0x6A
+ ONE_BYTE_OPCODE, // 0x6B
+ ONE_BYTE_OPCODE, // 0x6C
+ ONE_BYTE_OPCODE, // 0x6D
+ ONE_BYTE_OPCODE, // 0x6E
+ ONE_BYTE_OPCODE, // 0x6F
ONE_BYTE_OPCODE, // 0x70
ONE_BYTE_OPCODE, // 0x71
ONE_BYTE_OPCODE, // 0x72
@@ -134,12 +134,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x77
ONE_BYTE_OPCODE, // 0x78
ONE_BYTE_OPCODE, // 0x79
- ONE_BYTE_OPCODE, // 0x7a
- ONE_BYTE_OPCODE, // 0x7b
- ONE_BYTE_OPCODE, // 0x7c
- ONE_BYTE_OPCODE, // 0x7d
- ONE_BYTE_OPCODE, // 0x7e
- ONE_BYTE_OPCODE, // 0x7f
+ ONE_BYTE_OPCODE, // 0x7A
+ ONE_BYTE_OPCODE, // 0x7B
+ ONE_BYTE_OPCODE, // 0x7C
+ ONE_BYTE_OPCODE, // 0x7D
+ ONE_BYTE_OPCODE, // 0x7E
+ ONE_BYTE_OPCODE, // 0x7F
ONE_BYTE_OPCODE, // 0x80
ONE_BYTE_OPCODE, // 0x81
ONE_BYTE_OPCODE, // 0x82
@@ -150,12 +150,12 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x87
ONE_BYTE_OPCODE, // 0x88
ONE_BYTE_OPCODE, // 0x89
- ONE_BYTE_OPCODE, // 0x8a
- ONE_BYTE_OPCODE, // 0x8b
- ONE_BYTE_OPCODE, // 0x8c
- ONE_BYTE_OPCODE, // 0x8d
- ONE_BYTE_OPCODE, // 0x8e
- ONE_BYTE_OPCODE, // 0x8f
+ ONE_BYTE_OPCODE, // 0x8A
+ ONE_BYTE_OPCODE, // 0x8B
+ ONE_BYTE_OPCODE, // 0x8C
+ ONE_BYTE_OPCODE, // 0x8D
+ ONE_BYTE_OPCODE, // 0x8E
+ ONE_BYTE_OPCODE, // 0x8F
ONE_BYTE_OPCODE, // 0x90
ONE_BYTE_OPCODE, // 0x91
ONE_BYTE_OPCODE, // 0x92
@@ -166,108 +166,108 @@ Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
ONE_BYTE_OPCODE, // 0x97
ONE_BYTE_OPCODE, // 0x98
ONE_BYTE_OPCODE, // 0x99
- ONE_BYTE_OPCODE, // 0x9a
- ONE_BYTE_OPCODE, // 0x9b
- TWO_BYTE_DISJOINT_OPCODE, // 0x9c
- TWO_BYTE_DISJOINT_OPCODE, // 0x9d
- TWO_BYTE_DISJOINT_OPCODE, // 0x9e
- TWO_BYTE_DISJOINT_OPCODE, // 0x9f
- TWO_BYTE_DISJOINT_OPCODE, // 0xa0
- TWO_BYTE_DISJOINT_OPCODE, // 0xa1
- TWO_BYTE_DISJOINT_OPCODE, // 0xa2
- TWO_BYTE_DISJOINT_OPCODE, // 0xa3
- TWO_BYTE_DISJOINT_OPCODE, // 0xa4
- THREE_NIBBLE_OPCODE, // 0xa5
- TWO_BYTE_DISJOINT_OPCODE, // 0xa6
- THREE_NIBBLE_OPCODE, // 0xa7
- ONE_BYTE_OPCODE, // 0xa8
- ONE_BYTE_OPCODE, // 0xa9
- ONE_BYTE_OPCODE, // 0xaa
- ONE_BYTE_OPCODE, // 0xab
- ONE_BYTE_OPCODE, // 0xac
- ONE_BYTE_OPCODE, // 0xad
- ONE_BYTE_OPCODE, // 0xae
- ONE_BYTE_OPCODE, // 0xaf
- ONE_BYTE_OPCODE, // 0xb0
- ONE_BYTE_OPCODE, // 0xb1
- TWO_BYTE_OPCODE, // 0xb2
- TWO_BYTE_OPCODE, // 0xb3
- TWO_BYTE_DISJOINT_OPCODE, // 0xb4
- TWO_BYTE_DISJOINT_OPCODE, // 0xb5
- TWO_BYTE_DISJOINT_OPCODE, // 0xb6
- TWO_BYTE_DISJOINT_OPCODE, // 0xb7
- TWO_BYTE_DISJOINT_OPCODE, // 0xb8
- TWO_BYTE_OPCODE, // 0xb9
- ONE_BYTE_OPCODE, // 0xba
- ONE_BYTE_OPCODE, // 0xbb
- ONE_BYTE_OPCODE, // 0xbc
- ONE_BYTE_OPCODE, // 0xbd
- ONE_BYTE_OPCODE, // 0xbe
- ONE_BYTE_OPCODE, // 0xbf
- THREE_NIBBLE_OPCODE, // 0xc0
- THREE_NIBBLE_OPCODE, // 0xc1
- THREE_NIBBLE_OPCODE, // 0xc2
- THREE_NIBBLE_OPCODE, // 0xc3
- THREE_NIBBLE_OPCODE, // 0xc4
- THREE_NIBBLE_OPCODE, // 0xc5
- THREE_NIBBLE_OPCODE, // 0xc6
- ONE_BYTE_OPCODE, // 0xc7
- THREE_NIBBLE_OPCODE, // 0xc8
- THREE_NIBBLE_OPCODE, // 0xc9
- THREE_NIBBLE_OPCODE, // 0xca
- THREE_NIBBLE_OPCODE, // 0xcb
- THREE_NIBBLE_OPCODE, // 0xcc
- TWO_BYTE_DISJOINT_OPCODE, // 0xcd
- TWO_BYTE_DISJOINT_OPCODE, // 0xce
- TWO_BYTE_DISJOINT_OPCODE, // 0xcf
- ONE_BYTE_OPCODE, // 0xd0
- ONE_BYTE_OPCODE, // 0xd1
- ONE_BYTE_OPCODE, // 0xd2
- ONE_BYTE_OPCODE, // 0xd3
- ONE_BYTE_OPCODE, // 0xd4
- ONE_BYTE_OPCODE, // 0xd5
- ONE_BYTE_OPCODE, // 0xd6
- ONE_BYTE_OPCODE, // 0xd7
- ONE_BYTE_OPCODE, // 0xd8
- ONE_BYTE_OPCODE, // 0xd9
- ONE_BYTE_OPCODE, // 0xda
- ONE_BYTE_OPCODE, // 0xdb
- ONE_BYTE_OPCODE, // 0xdc
- ONE_BYTE_OPCODE, // 0xdd
- ONE_BYTE_OPCODE, // 0xde
- ONE_BYTE_OPCODE, // 0xdf
- ONE_BYTE_OPCODE, // 0xe0
- ONE_BYTE_OPCODE, // 0xe1
- ONE_BYTE_OPCODE, // 0xe2
- TWO_BYTE_DISJOINT_OPCODE, // 0xe3
- TWO_BYTE_DISJOINT_OPCODE, // 0xe4
- TWO_BYTE_OPCODE, // 0xe5
- TWO_BYTE_DISJOINT_OPCODE, // 0xe6
- TWO_BYTE_DISJOINT_OPCODE, // 0xe7
- ONE_BYTE_OPCODE, // 0xe8
- ONE_BYTE_OPCODE, // 0xe9
- ONE_BYTE_OPCODE, // 0xea
- TWO_BYTE_DISJOINT_OPCODE, // 0xeb
- TWO_BYTE_DISJOINT_OPCODE, // 0xec
- TWO_BYTE_DISJOINT_OPCODE, // 0xed
- ONE_BYTE_OPCODE, // 0xee
- ONE_BYTE_OPCODE, // 0xef
- ONE_BYTE_OPCODE, // 0xf0
- ONE_BYTE_OPCODE, // 0xf1
- ONE_BYTE_OPCODE, // 0xf2
- ONE_BYTE_OPCODE, // 0xf3
- ONE_BYTE_OPCODE, // 0xf4
- ONE_BYTE_OPCODE, // 0xf5
- ONE_BYTE_OPCODE, // 0xf6
- ONE_BYTE_OPCODE, // 0xf7
- ONE_BYTE_OPCODE, // 0xf8
- ONE_BYTE_OPCODE, // 0xf9
- ONE_BYTE_OPCODE, // 0xfa
- ONE_BYTE_OPCODE, // 0xfb
- ONE_BYTE_OPCODE, // 0xfc
- ONE_BYTE_OPCODE, // 0xfd
- TWO_BYTE_DISJOINT_OPCODE, // 0xfe
- TWO_BYTE_DISJOINT_OPCODE, // 0xff
+ ONE_BYTE_OPCODE, // 0x9A
+ ONE_BYTE_OPCODE, // 0x9B
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9C
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9D
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9E
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9F
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA0
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA1
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA4
+ THREE_NIBBLE_OPCODE, // 0xA5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xA6
+ THREE_NIBBLE_OPCODE, // 0xA7
+ ONE_BYTE_OPCODE, // 0xA8
+ ONE_BYTE_OPCODE, // 0xA9
+ ONE_BYTE_OPCODE, // 0xAA
+ ONE_BYTE_OPCODE, // 0xAB
+ ONE_BYTE_OPCODE, // 0xAC
+ ONE_BYTE_OPCODE, // 0xAD
+ ONE_BYTE_OPCODE, // 0xAE
+ ONE_BYTE_OPCODE, // 0xAF
+ ONE_BYTE_OPCODE, // 0xB0
+ ONE_BYTE_OPCODE, // 0xB1
+ TWO_BYTE_OPCODE, // 0xB2
+ TWO_BYTE_OPCODE, // 0xB3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB4
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB7
+ TWO_BYTE_DISJOINT_OPCODE, // 0xB8
+ TWO_BYTE_OPCODE, // 0xB9
+ ONE_BYTE_OPCODE, // 0xBA
+ ONE_BYTE_OPCODE, // 0xBB
+ ONE_BYTE_OPCODE, // 0xBC
+ ONE_BYTE_OPCODE, // 0xBD
+ ONE_BYTE_OPCODE, // 0xBE
+ ONE_BYTE_OPCODE, // 0xBF
+ THREE_NIBBLE_OPCODE, // 0xC0
+ THREE_NIBBLE_OPCODE, // 0xC1
+ THREE_NIBBLE_OPCODE, // 0xC2
+ THREE_NIBBLE_OPCODE, // 0xC3
+ THREE_NIBBLE_OPCODE, // 0xC4
+ THREE_NIBBLE_OPCODE, // 0xC5
+ THREE_NIBBLE_OPCODE, // 0xC6
+ ONE_BYTE_OPCODE, // 0xC7
+ THREE_NIBBLE_OPCODE, // 0xC8
+ THREE_NIBBLE_OPCODE, // 0xC9
+ THREE_NIBBLE_OPCODE, // 0xCA
+ THREE_NIBBLE_OPCODE, // 0xCB
+ THREE_NIBBLE_OPCODE, // 0xCC
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCD
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCE
+ TWO_BYTE_DISJOINT_OPCODE, // 0xCF
+ ONE_BYTE_OPCODE, // 0xD0
+ ONE_BYTE_OPCODE, // 0xD1
+ ONE_BYTE_OPCODE, // 0xD2
+ ONE_BYTE_OPCODE, // 0xD3
+ ONE_BYTE_OPCODE, // 0xD4
+ ONE_BYTE_OPCODE, // 0xD5
+ ONE_BYTE_OPCODE, // 0xD6
+ ONE_BYTE_OPCODE, // 0xD7
+ ONE_BYTE_OPCODE, // 0xD8
+ ONE_BYTE_OPCODE, // 0xD9
+ ONE_BYTE_OPCODE, // 0xDA
+ ONE_BYTE_OPCODE, // 0xDB
+ ONE_BYTE_OPCODE, // 0xDC
+ ONE_BYTE_OPCODE, // 0xDD
+ ONE_BYTE_OPCODE, // 0xDE
+ ONE_BYTE_OPCODE, // 0xDF
+ ONE_BYTE_OPCODE, // 0xE0
+ ONE_BYTE_OPCODE, // 0xE1
+ ONE_BYTE_OPCODE, // 0xE2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE4
+ TWO_BYTE_OPCODE, // 0xE5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xE7
+ ONE_BYTE_OPCODE, // 0xE8
+ ONE_BYTE_OPCODE, // 0xE9
+ ONE_BYTE_OPCODE, // 0xEA
+ TWO_BYTE_DISJOINT_OPCODE, // 0xEB
+ TWO_BYTE_DISJOINT_OPCODE, // 0xEC
+ TWO_BYTE_DISJOINT_OPCODE, // 0xED
+ ONE_BYTE_OPCODE, // 0xEE
+ ONE_BYTE_OPCODE, // 0xEF
+ ONE_BYTE_OPCODE, // 0xF0
+ ONE_BYTE_OPCODE, // 0xF1
+ ONE_BYTE_OPCODE, // 0xF2
+ ONE_BYTE_OPCODE, // 0xF3
+ ONE_BYTE_OPCODE, // 0xF4
+ ONE_BYTE_OPCODE, // 0xF5
+ ONE_BYTE_OPCODE, // 0xF6
+ ONE_BYTE_OPCODE, // 0xF7
+ ONE_BYTE_OPCODE, // 0xF8
+ ONE_BYTE_OPCODE, // 0xF9
+ ONE_BYTE_OPCODE, // 0xFA
+ ONE_BYTE_OPCODE, // 0xFB
+ ONE_BYTE_OPCODE, // 0xFC
+ ONE_BYTE_OPCODE, // 0xFD
+ TWO_BYTE_DISJOINT_OPCODE, // 0xFE
+ TWO_BYTE_DISJOINT_OPCODE, // 0xFF
};
// These register names are defined in a way to match the native disassembler
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index a8eb807131..3cb4f2e375 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -43,8 +43,6 @@ const Register LoadDescriptor::SlotRegister() { return r2; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r6; }
-
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
@@ -198,6 +196,12 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 44f1ba5abb..fe24884378 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -443,7 +443,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(object != value);
if (emit_debug_code()) {
CmpP(value, MemOperand(address));
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -1057,9 +1057,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
LoadP(cp, MemOperand(ip));
#ifdef DEBUG
+ mov(r1, Operand(Context::kInvalidContext));
mov(ip,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
- StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+ StoreP(r1, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return.
@@ -1115,7 +1116,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
CmpLogicalP(src_reg, dst_reg);
- Check(lt, kStackAccessBelowStackPointer);
+ Check(lt, AbortReason::kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
@@ -1352,7 +1353,7 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
@@ -1362,6 +1363,10 @@ void MacroAssembler::PushStackHandler() {
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
+ // Store padding.
+ mov(r0, Operand(Smi::kZero));
+ StoreP(r0, MemOperand(sp)); // Padding.
+
// Copy the old handler into the next handler slot.
mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
kPointerSize);
@@ -1370,15 +1375,16 @@ void MacroAssembler::PushStackHandler() {
}
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
mov(ip,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
-
StoreP(r3, MemOperand(ip));
+
+ Drop(1); // Drop padding.
}
void MacroAssembler::CompareObjectType(Register object, Register map,
@@ -1392,7 +1398,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE <= 0xffff);
+ STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
CmpP(type_reg, Operand(type));
}
@@ -1547,12 +1553,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, BailoutReason reason,
- CRegister cr) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
@@ -1560,11 +1565,11 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1617,7 +1622,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmi, cr0);
+ Check(ne, AbortReason::kOperandIsASmi, cr0);
}
}
@@ -1625,7 +1630,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(eq, kOperandIsNotSmi, cr0);
+ Check(eq, AbortReason::kOperandIsNotASmi, cr0);
}
}
@@ -1633,11 +1638,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFixedArray);
+ Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
@@ -1645,11 +1650,11 @@ void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotAFunction);
+ Check(eq, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1657,18 +1662,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
- Check(eq, kOperandIsNotABoundFunction);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object);
- Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
// Load map
Register map = object;
@@ -1687,7 +1692,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
- Check(eq, kOperandIsNotAGeneratorObject);
+ Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
@@ -1699,7 +1704,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
beq(&done_checking, Label::kNear);
LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -3301,7 +3306,7 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
- DCHECK_EQ(value & 0xffffffff, 0);
+ DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
#else
@@ -3402,7 +3407,7 @@ void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (dst != src) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
- DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xffffffff, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xFFFFFFFF, 0);
int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
nihf(dst, Operand(value));
#else
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 4076c171ad..fcc62f21a9 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -873,13 +873,13 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
// Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(AbortReason reason);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index a130f359f0..f6754bdd4b 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -227,7 +227,7 @@ void S390Debugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
} else {
sim_->ExecuteInstruction(
@@ -273,7 +273,7 @@ void S390Debugger::Debug() {
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
- ->InstructionBits() == 0x7d821008) {
+ ->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
} else {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -331,7 +331,7 @@ void S390Debugger::Debug() {
PrintF("%3s: %f 0x%08x %08x\n",
GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
} else if (arg1[0] == 'r' &&
(arg1[1] >= '0' && arg1[1] <= '2' &&
@@ -353,7 +353,7 @@ void S390Debugger::Debug() {
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -661,6 +661,15 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+// we use TRAP4 here (0xBF22)
+#if V8_TARGET_LITTLE_ENDIAN
+ instruction->SetInstructionBits(0x1000FFB2);
+#else
+ instruction->SetInstructionBits(0xB2FF0000 | kCallRtRedirected);
+#endif
+}
+
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
@@ -728,15 +737,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
- static base::OnceType once = V8_ONCE_INIT;
- base::CallOnce(&once, &Simulator::EvalTableInit);
-}
-
Simulator::EvaluateFuncType Simulator::EvalTable[] = {nullptr};
void Simulator::EvalTableInit() {
@@ -1493,7 +1493,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize(isolate);
+ static base::OnceType once = V8_ONCE_INIT;
+ base::CallOnce(&once, &Simulator::EvalTableInit);
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_S390X
@@ -1538,119 +1539,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
Simulator::~Simulator() { free(stack_); }
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
- : external_function_(external_function),
-// we use TRAP4 here (0xBF22)
-#if V8_TARGET_LITTLE_ENDIAN
- swi_instruction_(0x1000FFB2),
-#else
- swi_instruction_(0xB2FF0000 | kCallRtRedirected),
-#endif
- type_(type),
- next_(nullptr) {
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->FlushICache(
- isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_), sizeof(FourByteInstr));
- isolate->set_simulator_redirection(this);
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
- function_descriptor_[1] = 0;
- function_descriptor_[2] = 0;
- }
- }
-
- void* address() {
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- return reinterpret_cast<void*>(function_descriptor_);
- } else {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(Isolate* isolate, void* external_function,
- ExternalReference::Type type) {
- Redirection* current = isolate->simulator_redirection();
- for (; current != nullptr; current = current->next_) {
- if (current->external_function_ == external_function &&
- current->type_ == type) {
- return current;
- }
- }
- return new Redirection(isolate, external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - offsetof(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static Redirection* FromAddress(void* address) {
- int delta = ABI_USES_FUNCTION_DESCRIPTORS
- ? offsetof(Redirection, function_descriptor_)
- : offsetof(Redirection, swi_instruction_);
- char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
- return redirection->external_function();
- }
-
- static void DeleteChain(Redirection* redirection) {
- while (redirection != nullptr) {
- Redirection* next = redirection->next_;
- delete redirection;
- redirection = next;
- }
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
- intptr_t function_descriptor_[3];
-};
-
-// static
-void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
- Redirection* first) {
- Redirection::DeleteChain(first);
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
- delete static_cast<CachePage*>(entry->value);
- }
- delete i_cache;
- }
-}
-
-void* Simulator::RedirectExternalReference(Isolate* isolate,
- void* external_function,
- ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(
- isolate->simulator_redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address();
-}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -1761,9 +1649,9 @@ void Simulator::SetFpResult(const double& result) {
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
#if 0 // A good idea to trash volatile registers, needs to be done
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
+ registers_[2] = 0x50BAD4U;
+ registers_[3] = 0x50BAD4U;
+ registers_[12] = 0x50BAD4U;
#endif
}
@@ -1884,7 +1772,7 @@ void Simulator::Format(Instruction* instr, const char* format) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
+ uint32_t urest = 0xFFFFFFFFU - uleft;
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
@@ -1971,7 +1859,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
bool stack_aligned =
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ Redirection* redirection = Redirection::FromInstruction(instr);
const int kArgCount = 9;
const int kRegisterArgCount = 5;
int arg0_regnum = 2;
@@ -2342,7 +2230,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
- if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
PrintF(
"Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n",
@@ -2409,7 +2297,7 @@ int16_t Simulator::ByteReverse(int16_t hword) {
#if defined(__GNUC__)
return __builtin_bswap16(hword);
#else
- return (hword << 8) | ((hword >> 8) & 0x00ff);
+ return (hword << 8) | ((hword >> 8) & 0x00FF);
#endif
}
@@ -2418,9 +2306,9 @@ int32_t Simulator::ByteReverse(int32_t word) {
return __builtin_bswap32(word);
#else
int32_t result = word << 24;
- result |= (word << 8) & 0x00ff0000;
- result |= (word >> 8) & 0x0000ff00;
- result |= (word >> 24) & 0x00000ff;
+ result |= (word << 8) & 0x00FF0000;
+ result |= (word >> 8) & 0x0000FF00;
+ result |= (word >> 24) & 0x00000FF;
return result;
#endif
}
@@ -2592,7 +2480,8 @@ void Simulator::CallInternal(byte* entry, int reg_arg_count) {
set_register(r13, r13_val);
}
-intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+intptr_t Simulator::CallImpl(byte* entry, int argument_count,
+ const intptr_t* arguments) {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -2606,16 +2495,13 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
int64_t r12_val = get_register(r12);
int64_t r13_val = get_register(r13);
- va_list parameters;
- va_start(parameters, argument_count);
// Set up arguments
// First 5 arguments passed in registers r2-r6.
- int reg_arg_count = (argument_count > 5) ? 5 : argument_count;
+ int reg_arg_count = std::min(5, argument_count);
int stack_arg_count = argument_count - reg_arg_count;
for (int i = 0; i < reg_arg_count; i++) {
- intptr_t value = va_arg(parameters, intptr_t);
- set_register(i + 2, value);
+ set_register(i + 2, arguments[i]);
}
// Remaining arguments passed on stack.
@@ -2631,11 +2517,8 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument =
reinterpret_cast<intptr_t*>(entry_stack + kCalleeRegisterSaveAreaSize);
- for (int i = 0; i < stack_arg_count; i++) {
- intptr_t value = va_arg(parameters, intptr_t);
- stack_argument[i] = value;
- }
- va_end(parameters);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_arg_count * sizeof(*arguments));
set_register(sp, entry_stack);
// Prepare to execute the code at entry
@@ -2716,8 +2599,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(sp, original_stack);
// Return value register
- intptr_t result = get_register(r2);
- return result;
+ return get_register(r2);
}
void Simulator::CallFP(byte* entry, double d0, double d1) {
@@ -3663,7 +3545,7 @@ EVALUATE(EX) {
char new_instr_buf[8];
char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
- the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+ the_instr |= static_cast<SixByteInstr>(r1_val & 0xFF)
<< (8 * inst_length - 16);
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
@@ -4004,9 +3886,9 @@ EVALUATE(BXH) {
DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2);
// r1_val is the first operand, r3_val is the increment
- int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
- int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
- intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int32_t r1_val = (r1 == 0) ? 0 : get_register(r1);
+ int32_t r3_val = (r3 == 0) ? 0 : get_register(r3);
+ intptr_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t branch_address = b2_val + d2;
// increment r1_val
r1_val += r3_val;
@@ -5464,7 +5346,7 @@ EVALUATE(TRAP4) {
int64_t sp_addr = get_register(sp);
for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
// we dont want to whack the RA (r14)
- if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+ if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xDEADBABE;
}
SoftwareInterrupt(instr);
return length;
@@ -6948,7 +6830,7 @@ EVALUATE(LLGFR) {
DCHECK_OPCODE(LLGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
int32_t r2_val = get_low_register<int32_t>(r2);
- uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+ uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000FFFFFFFF);
set_register(r1, r2_finalval);
return length;
}
@@ -8017,8 +7899,8 @@ EVALUATE(LRVH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
int16_t mem_val = ReadH(mem_addr, instr);
- int32_t result = ByteReverse(mem_val) & 0x0000ffff;
- result |= r1_val & 0xffff0000;
+ int32_t result = ByteReverse(mem_val) & 0x0000FFFF;
+ result |= r1_val & 0xFFFF0000;
set_low_register(r1, result);
return length;
}
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index a214b198df..1ff8020e6a 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -5,7 +5,7 @@
// Declares a Simulator for S390 instructions if we are not generating a native
// S390 binary. This Simulator allows us to run and debug S390 code generation
// on regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// V8 calls into generated code via the GeneratedCode wrapper,
// which will start execution in the Simulator or forwards to the real entry
// on a S390 hardware platform.
@@ -14,56 +14,13 @@
#include "src/allocation.h"
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native s390 platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type ppc_regexp_matcher.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on s390 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- USE(isolate);
- }
-};
-} // namespace internal
-} // namespace v8
-
-#else // !defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/s390/constants-s390.h"
+#include "src/simulator-base.h"
namespace v8 {
namespace internal {
@@ -94,7 +51,7 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
-class Simulator {
+class Simulator : public SimulatorBase {
public:
friend class S390Debugger;
enum Register {
@@ -206,15 +163,11 @@ class Simulator {
// Executes S390 instructions until the PC reaches end_sim_pc.
void Execute();
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
+ template <typename Return, typename... Args>
+ Return Call(byte* entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
@@ -230,6 +183,9 @@ class Simulator {
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
// ICache checking.
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -250,6 +206,8 @@ class Simulator {
end_sim_pc = -2
};
+ intptr_t CallImpl(byte* entry, int argument_count, const intptr_t* arguments);
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -440,11 +398,6 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(
- Isolate* isolate, void* external_function,
- v8::internal::ExternalReference::Type type);
-
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
@@ -1248,43 +1201,8 @@ class Simulator {
#undef EVALUATE
};
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4))
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- Simulator::current(isolate)->Call( \
- entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
- (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. The JS-based limit normally points near the end of
-// the simulator stack. When the C-based limit is exhausted we reflect that by
-// lowering the JS-based limit as well, to make stack checks trigger.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit(c_limit);
- }
-
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(isolate);
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
- Simulator::current(isolate)->PopAddress();
- }
-};
-
} // namespace internal
} // namespace v8
-#endif // !defined(USE_SIMULATOR)
+#endif // defined(USE_SIMULATOR)
#endif // V8_S390_SIMULATOR_S390_H_
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 06a6888465..83031a2f36 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -52,9 +52,7 @@ SafepointTable::SafepointTable(Address instruction_start,
SafepointTable::SafepointTable(Code* code)
: SafepointTable(code->instruction_start(), code->safepoint_table_offset(),
- code->stack_slots(), true) {
- DCHECK(code->is_turbofanned());
-}
+ code->stack_slots(), true) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
for (unsigned i = 0; i < length(); i++) {
@@ -134,28 +132,20 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
int arguments,
Safepoint::DeoptMode deopt_mode) {
DCHECK_GE(arguments, 0);
- DeoptimizationInfo info;
- info.pc = assembler->pc_offset();
- info.arguments = arguments;
- info.has_doubles = (kind & Safepoint::kWithDoubles);
- info.trampoline = -1;
- deoptimization_info_.Add(info, zone_);
- deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
+ deoptimization_info_.Add(
+ DeoptimizationInfo(zone_, assembler->pc_offset(), arguments, kind),
+ zone_);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
- last_lazy_safepoint_ = deopt_index_list_.length();
+ last_lazy_safepoint_ = deoptimization_info_.length();
}
- indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
- registers_.Add((kind & Safepoint::kWithRegisters)
- ? new (zone_) ZoneList<int>(4, zone_)
- : nullptr,
- zone_);
- return Safepoint(indexes_.last(), registers_.last());
+ DeoptimizationInfo& new_info = deoptimization_info_.last();
+ return Safepoint(new_info.indexes, new_info.registers);
}
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
- while (last_lazy_safepoint_ < deopt_index_list_.length()) {
- deopt_index_list_[last_lazy_safepoint_++] = index;
+ while (last_lazy_safepoint_ < deoptimization_info_.length()) {
+ deoptimization_info_[last_lazy_safepoint_++].deopt_index = index;
}
}
@@ -201,17 +191,17 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// Emit sorted table of pc offsets together with deoptimization indexes.
for (int i = 0; i < length; i++) {
- assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeExceptPC(deoptimization_info_[i],
- deopt_index_list_[i]));
- assembler->dd(deoptimization_info_[i].trampoline);
+ const DeoptimizationInfo& info = deoptimization_info_[i];
+ assembler->dd(info.pc);
+ assembler->dd(EncodeExceptPC(info));
+ assembler->dd(info.trampoline);
}
// Emit table of bitmaps.
ZoneList<uint8_t> bits(bytes_per_entry, zone_);
for (int i = 0; i < length; i++) {
- ZoneList<int>* indexes = indexes_[i];
- ZoneList<int>* registers = registers_[i];
+ ZoneList<int>* indexes = deoptimization_info_[i].indexes;
+ ZoneList<int>* registers = deoptimization_info_[i].registers;
bits.Clear();
bits.AddBlock(0, bytes_per_entry, zone_);
@@ -248,13 +238,10 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
emitted_ = true;
}
-
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
- unsigned index) {
- uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
- encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
- encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
- return encoding;
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
+ return SafepointEntry::DeoptimizationIndexField::encode(info.deopt_index) |
+ SafepointEntry::ArgumentsField::encode(info.arguments) |
+ SafepointEntry::SaveDoublesField::encode(info.has_doubles);
}
void SafepointTableBuilder::RemoveDuplicates() {
@@ -264,44 +251,36 @@ void SafepointTableBuilder::RemoveDuplicates() {
// pointers and without deoptimization info.
int length = deoptimization_info_.length();
- DCHECK_EQ(length, deopt_index_list_.length());
- DCHECK_EQ(length, indexes_.length());
- DCHECK_EQ(length, registers_.length());
-
if (length < 2) return;
// Check that all entries (1, length] are identical to entry 0.
+ const DeoptimizationInfo& first_info = deoptimization_info_[0];
for (int i = 1; i < length; ++i) {
- if (!IsIdenticalExceptForPc(0, i)) return;
+ if (!IsIdenticalExceptForPc(first_info, deoptimization_info_[i])) return;
}
- // If we get here, all entries were identical. Rewind all lists to just one
+ // If we get here, all entries were identical. Rewind the list to just one
// entry, and set the pc to kMaxUInt32.
deoptimization_info_.Rewind(1);
- deopt_index_list_.Rewind(1);
- indexes_.Rewind(1);
- registers_.Rewind(1);
deoptimization_info_[0].pc = kMaxUInt32;
}
-bool SafepointTableBuilder::IsIdenticalExceptForPc(int index1,
- int index2) const {
- DeoptimizationInfo& deopt_info_1 = deoptimization_info_[index1];
- DeoptimizationInfo& deopt_info_2 = deoptimization_info_[index2];
- if (deopt_info_1.arguments != deopt_info_2.arguments) return false;
- if (deopt_info_1.has_doubles != deopt_info_2.has_doubles) return false;
+bool SafepointTableBuilder::IsIdenticalExceptForPc(
+ const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
+ if (info1.arguments != info2.arguments) return false;
+ if (info1.has_doubles != info2.has_doubles) return false;
- if (deopt_index_list_[index1] != deopt_index_list_[index2]) return false;
+ if (info1.deopt_index != info2.deopt_index) return false;
- ZoneList<int>* indexes1 = indexes_[index1];
- ZoneList<int>* indexes2 = indexes_[index2];
+ ZoneList<int>* indexes1 = info1.indexes;
+ ZoneList<int>* indexes2 = info2.indexes;
if (indexes1->length() != indexes2->length()) return false;
for (int i = 0; i < indexes1->length(); ++i) {
if (indexes1->at(i) != indexes2->at(i)) return false;
}
- ZoneList<int>* registers1 = registers_[index1];
- ZoneList<int>* registers2 = registers_[index2];
+ ZoneList<int>* registers1 = info1.registers;
+ ZoneList<int>* registers2 = info2.registers;
if (registers1) {
if (!registers2) return false;
if (registers1->length() != registers2->length()) return false;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 9f063bac20..5c6b413fa1 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -194,8 +194,8 @@ class Safepoint BASE_EMBEDDED {
private:
Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
: indexes_(indexes), registers_(registers) {}
- ZoneList<int>* indexes_;
- ZoneList<int>* registers_;
+ ZoneList<int>* const indexes_;
+ ZoneList<int>* const registers_;
friend class SafepointTableBuilder;
};
@@ -205,9 +205,6 @@ class SafepointTableBuilder BASE_EMBEDDED {
public:
explicit SafepointTableBuilder(Zone* zone)
: deoptimization_info_(32, zone),
- deopt_index_list_(32, zone),
- indexes_(32, zone),
- registers_(32, zone),
emitted_(false),
last_lazy_safepoint_(0),
zone_(zone) { }
@@ -225,7 +222,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
// outstanding safepoints.
void RecordLazyDeoptimizationIndex(int index);
void BumpLastLazySafepointIndex() {
- last_lazy_safepoint_ = deopt_index_list_.length();
+ last_lazy_safepoint_ = deoptimization_info_.length();
}
// Emit the safepoint table after the body. The number of bits per
@@ -244,18 +241,30 @@ class SafepointTableBuilder BASE_EMBEDDED {
unsigned arguments;
bool has_doubles;
int trampoline;
+ ZoneList<int>* indexes;
+ ZoneList<int>* registers;
+ unsigned deopt_index;
+ DeoptimizationInfo(Zone* zone, unsigned pc, unsigned arguments,
+ Safepoint::Kind kind)
+ : pc(pc),
+ arguments(arguments),
+ has_doubles(kind & Safepoint::kWithDoubles),
+ trampoline(-1),
+ indexes(new (zone) ZoneList<int>(8, zone)),
+ registers(kind & Safepoint::kWithRegisters
+ ? new (zone) ZoneList<int>(4, zone)
+ : nullptr),
+ deopt_index(Safepoint::kNoDeoptimizationIndex) {}
};
- uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
+ uint32_t EncodeExceptPC(const DeoptimizationInfo&);
- bool IsIdenticalExceptForPc(int index1, int index2) const;
+ bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
+ const DeoptimizationInfo&) const;
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
ZoneList<DeoptimizationInfo> deoptimization_info_;
- ZoneList<unsigned> deopt_index_list_;
- ZoneList<ZoneList<int>*> indexes_;
- ZoneList<ZoneList<int>*> registers_;
unsigned offset_;
bool emitted_;
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/simulator-base.cc
new file mode 100644
index 0000000000..72a5daefce
--- /dev/null
+++ b/deps/v8/src/simulator-base.cc
@@ -0,0 +1,95 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/simulator-base.h"
+
+#include "src/assembler.h"
+#include "src/isolate.h"
+#include "src/simulator.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// static
+base::Mutex* SimulatorBase::redirection_mutex_ = nullptr;
+
+// static
+Redirection* SimulatorBase::redirection_ = nullptr;
+
+// static
+void SimulatorBase::InitializeOncePerProcess() {
+ DCHECK_NULL(redirection_mutex_);
+ redirection_mutex_ = new base::Mutex();
+}
+
+// static
+void SimulatorBase::GlobalTearDown() {
+ delete redirection_mutex_;
+ redirection_mutex_ = nullptr;
+
+ Redirection::DeleteChain(redirection_);
+ redirection_ = nullptr;
+}
+
+// static
+void SimulatorBase::Initialize(Isolate* isolate) {
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+// static
+void SimulatorBase::TearDown(base::CustomMatcherHashMap* i_cache) {
+ if (i_cache != nullptr) {
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+// static
+void* SimulatorBase::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
+ ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
+ return redirection->address_of_instruction();
+}
+
+Redirection::Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
+ : external_function_(external_function), type_(type), next_(nullptr) {
+ next_ = Simulator::redirection();
+ Simulator::SetRedirectInstruction(
+ reinterpret_cast<Instruction*>(address_of_instruction()));
+ Simulator::FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&instruction_),
+ sizeof(instruction_));
+ Simulator::set_redirection(this);
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ function_descriptor_[0] = reinterpret_cast<intptr_t>(&instruction_);
+ function_descriptor_[1] = 0;
+ function_descriptor_[2] = 0;
+#endif
+}
+
+// static
+Redirection* Redirection::Get(Isolate* isolate, void* external_function,
+ ExternalReference::Type type) {
+ Redirection* current = Simulator::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->external_function_ == external_function &&
+ current->type_ == type) {
+ return current;
+ }
+ }
+ return new Redirection(isolate, external_function, type);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
new file mode 100644
index 0000000000..27dc87d050
--- /dev/null
+++ b/deps/v8/src/simulator-base.h
@@ -0,0 +1,163 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SIMULATOR_BASE_H_
+#define V8_SIMULATOR_BASE_H_
+
+#include <type_traits>
+
+#include "src/assembler.h"
+#include "src/globals.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+class Instruction;
+class Redirection;
+
+class SimulatorBase {
+ public:
+ // Call on process start and exit.
+ static void InitializeOncePerProcess();
+ static void GlobalTearDown();
+
+ // Call on isolate initialization and teardown.
+ static void Initialize(Isolate* isolate);
+ static void TearDown(base::CustomMatcherHashMap* i_cache);
+
+ static base::Mutex* redirection_mutex() { return redirection_mutex_; }
+ static Redirection* redirection() { return redirection_; }
+ static void set_redirection(Redirection* r) { redirection_ = r; }
+
+ protected:
+ template <typename Return, typename SimT, typename CallImpl, typename... Args>
+ static Return VariadicCall(SimT* sim, CallImpl call, byte* entry,
+ Args... args) {
+ // Convert all arguments to intptr_t. Fails if any argument is not integral
+ // or pointer.
+ std::array<intptr_t, sizeof...(args)> args_arr{ConvertArg(args)...};
+ intptr_t ret = (sim->*call)(entry, args_arr.size(), args_arr.data());
+ return ConvertReturn<Return>(ret);
+ }
+
+ private:
+ // Runtime call support. Uses the isolate in a thread-safe way.
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
+ ExternalReference::Type type);
+
+ static base::Mutex* redirection_mutex_;
+ static Redirection* redirection_;
+
+ // Helper methods to convert arbitrary integer or pointer arguments to the
+ // needed generic argument type intptr_t.
+
+ // Convert integral argument to intptr_t.
+ template <typename T>
+ static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
+ ConvertArg(T arg) {
+ static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+ return static_cast<intptr_t>(arg);
+ }
+
+ // Convert pointer-typed argument to intptr_t.
+ template <typename T>
+ static typename std::enable_if<std::is_pointer<T>::value, intptr_t>::type
+ ConvertArg(T arg) {
+ return reinterpret_cast<intptr_t>(arg);
+ }
+
+ // Convert back integral return types.
+ template <typename T>
+ static typename std::enable_if<std::is_integral<T>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+ return static_cast<T>(ret);
+ }
+
+ // Convert back pointer-typed return types.
+ template <typename T>
+ static typename std::enable_if<std::is_pointer<T>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ return reinterpret_cast<T>(ret);
+ }
+
+ // Convert back void return type (i.e. no return).
+ template <typename T>
+ static typename std::enable_if<std::is_void<T>::value, T>::type ConvertReturn(
+ intptr_t ret) {}
+};
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a trapping instruction that is handled by the simulator. We
+// write the original destination of the jump just at a known offset from the
+// trapping instruction so the simulator knows what to call.
+//
+// The following are trapping instructions used for various architectures:
+// - V8_TARGET_ARCH_ARM: svc (Supervisor Call)
+// - V8_TARGET_ARCH_ARM64: svc (Supervisor Call)
+// - V8_TARGET_ARCH_MIPS: swi (software-interrupt)
+// - V8_TARGET_ARCH_MIPS64: swi (software-interrupt)
+// - V8_TARGET_ARCH_PPC: svc (Supervisor Call)
+// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
+class Redirection {
+ public:
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type);
+
+ Address address_of_instruction() {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ return reinterpret_cast<Address>(function_descriptor_);
+#else
+ return reinterpret_cast<Address>(&instruction_);
+#endif
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(Isolate* isolate, void* external_function,
+ ExternalReference::Type type);
+
+ static Redirection* FromInstruction(Instruction* instruction) {
+ Address addr_of_instruction = reinterpret_cast<Address>(instruction);
+ Address addr_of_redirection =
+ addr_of_instruction - offsetof(Redirection, instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(intptr_t reg) {
+ Redirection* redirection = FromInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
+ private:
+ void* external_function_;
+ uint32_t instruction_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ intptr_t function_descriptor_[3];
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_SIMULATOR_BASE_H_
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 6eab8cf976..a2af7f59d5 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -5,6 +5,9 @@
#ifndef V8_SIMULATOR_H_
#define V8_SIMULATOR_H_
+#include "src/globals.h"
+#include "src/objects/code.h"
+
#if V8_TARGET_ARCH_IA32
#include "src/ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
@@ -25,4 +28,109 @@
#error Unsupported target architecture.
#endif
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit(c_limit);
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ return Simulator::current(isolate)->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
+ }
+};
+
+#else // defined(USE_SIMULATOR)
+// Running without a simulator on a native platform.
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code uses the C stack, we just use
+// the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
+};
+
+#endif // defined(USE_SIMULATOR)
+
+// Use this class either as {GeneratedCode<ret, arg1, arg2>} or
+// {GeneratedCode<ret(arg1, arg2)>} (see specialization below).
+template <typename Return, typename... Args>
+class GeneratedCode {
+ public:
+ using Signature = Return(Args...);
+
+ template <typename T>
+ static GeneratedCode FromAddress(Isolate* isolate, T* addr) {
+ return GeneratedCode(isolate, reinterpret_cast<Signature*>(addr));
+ }
+
+ static GeneratedCode FromCode(Code* code) {
+ return FromAddress(code->GetIsolate(), code->entry());
+ }
+
+#ifdef USE_SIMULATOR
+ // Defined in simulator-base.h.
+ Return Call(Args... args) {
+ return Simulator::current(isolate_)->template Call<Return>(
+ reinterpret_cast<byte*>(fn_ptr_), args...);
+ }
+#else
+ DISABLE_CFI_ICALL Return Call(Args... args) {
+ // When running without a simulator we call the entry directly.
+ return fn_ptr_(args...);
+ }
+#endif
+
+ private:
+ friend class GeneratedCode<Return(Args...)>;
+ Isolate* isolate_;
+ Signature* fn_ptr_;
+ GeneratedCode(Isolate* isolate, Signature* fn_ptr)
+ : isolate_(isolate), fn_ptr_(fn_ptr) {}
+};
+
+// Allow to use {GeneratedCode<ret(arg1, arg2)>} instead of
+// {GeneratedCode<ret, arg1, arg2>}.
+template <typename Return, typename... Args>
+class GeneratedCode<Return(Args...)> : public GeneratedCode<Return, Args...> {
+ public:
+ // Automatically convert from {GeneratedCode<ret, arg1, arg2>} to
+ // {GeneratedCode<ret(arg1, arg2)>}.
+ GeneratedCode(GeneratedCode<Return, Args...> other)
+ : GeneratedCode<Return, Args...>(other.isolate_, other.fn_ptr_) {}
+};
+
+} // namespace internal
+} // namespace v8
+
#endif // V8_SIMULATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
index 6fc7bfaf6b..eb04b54025 100644
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -69,7 +69,7 @@ class BuiltinDeserializerAllocator final {
// Builtin deserialization does not bake reservations into the snapshot, hence
// this is a nop.
- void DecodeReservation(Vector<const SerializedData::Reservation> res) {}
+ void DecodeReservation(std::vector<SerializedData::Reservation> res) {}
// These methods are used to pre-allocate builtin objects prior to
// deserialization.
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 3350ef3c0f..4210845573 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -82,7 +82,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
case Code::REGEXP: // No regexp literals initialized yet.
case Code::NUMBER_OF_KINDS: // Pseudo enum value.
case Code::BYTECODE_HANDLER: // No direct references to handlers.
- CHECK(false);
+ break; // hit UNREACHABLE below.
case Code::BUILTIN:
SerializeBuiltinReference(code_object, how_to_code, where_to_point, 0);
return;
@@ -106,12 +106,37 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (obj->IsScript()) {
+ Script* script_obj = Script::cast(obj);
+ DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
// Wrapper object is a context-dependent JSValue. Reset it here.
- Script::cast(obj)->set_wrapper(isolate()->heap()->undefined_value());
+ script_obj->set_wrapper(isolate()->heap()->undefined_value());
+ // We want to differentiate between undefined and uninitialized_symbol for
+ // context_data for now. It is hack to allow debugging for scripts that are
+ // included as a part of custom snapshot. (see debug::Script::IsEmbedded())
+ Object* context_data = script_obj->context_data();
+ if (context_data != isolate()->heap()->undefined_value() &&
+ context_data != isolate()->heap()->uninitialized_symbol()) {
+ script_obj->set_context_data(isolate()->heap()->undefined_value());
+ }
+ // We don't want to serialize host options to avoid serializing unnecessary
+ // object graph.
+ FixedArray* host_options = script_obj->host_defined_options();
+ script_obj->set_host_defined_options(
+ isolate()->heap()->empty_fixed_array());
+ SerializeGeneric(obj, how_to_code, where_to_point);
+ script_obj->set_host_defined_options(host_options);
+ script_obj->set_context_data(context_data);
+ return;
}
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ // TODO(7110): Enable serializing of Asm modules once the AsmWasmData
+ // is context independent.
+ DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
+ // Do not serialize when a debugger is active.
+ DCHECK(sfi->debug_info()->IsSmi());
+
// Mark SFI to indicate whether the code is cached.
bool was_deserialized = sfi->deserialized();
sfi->set_deserialized(sfi->is_compiled());
@@ -120,6 +145,11 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return;
}
+ if (obj->IsBytecodeArray()) {
+ // Clear the stack frame cache if present
+ BytecodeArray::cast(obj)->ClearFrameCacheFromSourcePositionTable();
+ }
+
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
@@ -221,8 +251,9 @@ std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
Isolate* isolate, Handle<FixedArray> input) {
Handle<WasmCompiledModule> compiled_module =
Handle<WasmCompiledModule>::cast(input);
- WasmCompiledModuleSerializer wasm_cs(isolate, 0, isolate->native_context(),
- handle(compiled_module->module_bytes()));
+ WasmCompiledModuleSerializer wasm_cs(
+ isolate, 0, isolate->native_context(),
+ handle(compiled_module->shared()->module_bytes()));
ScriptData* data = wasm_cs.Serialize(compiled_module);
return std::unique_ptr<ScriptData>(data);
}
@@ -432,11 +463,13 @@ ScriptData* SerializedCodeData::GetScriptData() {
return result;
}
-Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
+std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
+ uint32_t size = GetHeaderValue(kNumReservationsOffset);
+ std::vector<Reservation> reservations(size);
+ memcpy(reservations.data(), data_ + kHeaderSize,
+ size * sizeof(SerializedData::Reservation));
+ return reservations;
}
Vector<const byte> SerializedCodeData::Payload() const {
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 7f8ff5cc8b..edc1c2bf1d 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -129,7 +129,7 @@ class SerializedCodeData : public SerializedData {
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
- Vector<const Reservation> Reservations() const;
+ std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
Vector<const uint32_t> CodeStubKeys() const;
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
index b352409f7e..5b34bfa540 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -121,7 +121,7 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
}
void DefaultDeserializerAllocator::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
+ std::vector<SerializedData::Reservation> res) {
DCHECK_EQ(0, reservations_[NEW_SPACE].size());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
index 08d9f48cec..124c637fc6 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -44,7 +44,7 @@ class DefaultDeserializerAllocator final {
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
- void DecodeReservation(Vector<const SerializedData::Reservation> res);
+ void DecodeReservation(std::vector<SerializedData::Reservation> res);
bool ReserveSpace();
// Atomically reserves space for the two given deserializers. Guarantees
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 5d7d551c98..4b51e89e85 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -192,14 +192,21 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
if (isolate_->external_reference_redirector()) {
call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
}
- } else if (obj->IsExternalOneByteString()) {
- DCHECK(obj->map() == isolate_->heap()->native_source_string_map());
- ExternalOneByteString* string = ExternalOneByteString::cast(obj);
- DCHECK(string->is_short());
- string->set_resource(
- NativesExternalStringResource::DecodeForDeserialization(
- string->resource()));
- isolate_->heap()->RegisterExternalString(string);
+ } else if (obj->IsExternalString()) {
+ if (obj->map() == isolate_->heap()->native_source_string_map()) {
+ ExternalOneByteString* string = ExternalOneByteString::cast(obj);
+ DCHECK(string->is_short());
+ string->set_resource(
+ NativesExternalStringResource::DecodeForDeserialization(
+ string->resource()));
+ } else {
+ ExternalString* string = ExternalString::cast(obj);
+ uint32_t index = string->resource_as_uint32();
+ Address address =
+ reinterpret_cast<Address>(isolate_->api_external_references()[index]);
+ string->set_address_as_resource(address);
+ }
+ isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
JSTypedArray* typed_array = JSTypedArray::cast(obj);
CHECK(typed_array->byte_offset()->IsSmi());
@@ -234,6 +241,13 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
void* backing_store = off_heap_backing_stores_[store_index->value()];
fta->set_external_pointer(backing_store);
}
+ } else if (obj->IsBytecodeArray()) {
+ // TODO(mythria): Remove these once we store the default values for these
+ // fields in the serializer.
+ BytecodeArray* bytecode_array = BytecodeArray::cast(obj);
+ bytecode_array->set_interrupt_budget(
+ interpreter::Interpreter::kInterruptBudget);
+ bytecode_array->set_osr_loop_nesting_level(0);
}
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
@@ -496,8 +510,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kSynchronize:
// If we get here then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
- CHECK(false);
- break;
+ UNREACHABLE();
// Deserialize raw data of variable length.
case kVariableRawData: {
@@ -635,13 +648,31 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
#undef SINGLE_CASE
default:
- CHECK(false);
+ UNREACHABLE();
}
}
CHECK_EQ(limit, current);
return true;
}
+namespace {
+
+int FixupJSConstructStub(Isolate* isolate, int builtin_id) {
+ if (isolate->serializer_enabled()) return builtin_id;
+
+ if (FLAG_harmony_restrict_constructor_return &&
+ builtin_id == Builtins::kJSConstructStubGenericUnrestrictedReturn) {
+ return Builtins::kJSConstructStubGenericRestrictedReturn;
+ } else if (!FLAG_harmony_restrict_constructor_return &&
+ builtin_id == Builtins::kJSConstructStubGenericRestrictedReturn) {
+ return Builtins::kJSConstructStubGenericUnrestrictedReturn;
+ } else {
+ return builtin_id;
+ }
+}
+
+} // namespace
+
template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
@@ -692,7 +723,8 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
} else {
DCHECK_EQ(where, kBuiltin);
- int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
+ int raw_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
+ int builtin_id = FixupJSConstructStub(isolate, raw_id);
new_object = isolate->builtins()->builtin(builtin_id);
emit_write_barrier = false;
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index b1ecd61f2f..33c6c4a115 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -94,7 +94,7 @@ class SnapshotWriter {
static void WriteSnapshotData(FILE* fp,
const i::Vector<const i::byte>& blob) {
for (int i = 0; i < blob.length(); i++) {
- if ((i & 0x1f) == 0x1f) fprintf(fp, "\n");
+ if ((i & 0x1F) == 0x1F) fprintf(fp, "\n");
if (i > 0) fprintf(fp, ",");
fprintf(fp, "%u", static_cast<unsigned char>(blob.at(i)));
}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 11b21a17b3..baac565a11 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -17,7 +17,8 @@ PartialSerializer::PartialSerializer(
: Serializer(isolate),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
- can_be_rehashed_(true) {
+ can_be_rehashed_(true),
+ context_(nullptr) {
InitializeCodeAddressMap();
}
@@ -25,24 +26,23 @@ PartialSerializer::~PartialSerializer() {
OutputStatistics("PartialSerializer");
}
-void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
- DCHECK((*o)->IsNativeContext());
-
- Context* context = Context::cast(*o);
- reference_map()->AddAttachedReference(context->global_proxy());
+void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
+ context_ = *o;
+ DCHECK(context_->IsNativeContext());
+ reference_map()->AddAttachedReference(context_->global_proxy());
// The bootstrap snapshot has a code-stub context. When serializing the
// partial snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
- context->set(Context::NEXT_CONTEXT_LINK,
- isolate()->heap()->undefined_value());
- DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
+ context_->set(Context::NEXT_CONTEXT_LINK,
+ isolate()->heap()->undefined_value());
+ DCHECK(!context_->global_object()->IsUndefined(context_->GetIsolate()));
// Reset math random cache to get fresh random numbers.
- context->set_math_random_index(Smi::kZero);
- context->set_math_random_cache(isolate()->heap()->undefined_value());
+ context_->set_math_random_index(Smi::kZero);
+ context_->set_math_random_cache(isolate()->heap()->undefined_value());
- VisitRootPointer(Root::kPartialSnapshotCache, o);
+ VisitRootPointer(Root::kPartialSnapshotCache, reinterpret_cast<Object**>(o));
SerializeDeferredObjects();
SerializeEmbedderFields();
Pad();
@@ -87,6 +87,8 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
DCHECK(!obj->IsInternalizedString());
// Function and object templates are not context specific.
DCHECK(!obj->IsTemplateInfo());
+ // We should not end up at another native context.
+ DCHECK_IMPLIES(obj != context_, !obj->IsNativeContext());
FlushSkip(skip);
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index b436c40cbe..3225b004cb 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -21,7 +21,7 @@ class PartialSerializer : public Serializer<> {
~PartialSerializer() override;
// Serialize the objects reachable from a single object pointer.
- void Serialize(Object** o, bool include_global_proxy);
+ void Serialize(Context** o, bool include_global_proxy);
bool can_be_rehashed() const { return can_be_rehashed_; }
@@ -41,6 +41,7 @@ class PartialSerializer : public Serializer<> {
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
+ Context* context_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index f201342105..71436fe8fd 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -55,6 +55,17 @@ ExternalReferenceEncoder::~ExternalReferenceEncoder() {
#endif // DEBUG
}
+Maybe<ExternalReferenceEncoder::Value> ExternalReferenceEncoder::TryEncode(
+ Address address) {
+ Maybe<uint32_t> maybe_index = map_->Get(address);
+ if (maybe_index.IsNothing()) return Nothing<Value>();
+ Value result(maybe_index.FromJust());
+#ifdef DEBUG
+ if (result.is_from_api()) count_[result.index()]++;
+#endif // DEBUG
+ return Just<Value>(result);
+}
+
ExternalReferenceEncoder::Value ExternalReferenceEncoder::Encode(
Address address) {
Maybe<uint32_t> maybe_index = map_->Get(address);
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 6482c350f7..7d3d66a08d 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -22,6 +22,7 @@ class ExternalReferenceEncoder {
class Value {
public:
explicit Value(uint32_t raw) : value_(raw) {}
+ Value() : value_(0) {}
static uint32_t Encode(uint32_t index, bool is_from_api) {
return Index::encode(index) | IsFromAPI::encode(is_from_api);
}
@@ -40,6 +41,7 @@ class ExternalReferenceEncoder {
~ExternalReferenceEncoder();
Value Encode(Address key);
+ Maybe<Value> TryEncode(Address key);
const char* NameOfAddress(Isolate* isolate, Address address) const;
@@ -255,6 +257,7 @@ class SerializedData {
public:
class Reservation {
public:
+ Reservation() : reservation_(0) {}
explicit Reservation(uint32_t size)
: reservation_(ChunkSizeBits::encode(size)) {}
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index fd96850890..87e4fe8fdc 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -454,13 +454,24 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
+ // For external strings with known resources, we replace the resource field
+ // with the encoded external reference, which we restore upon deserialize.
+ // for native native source code strings, we replace the resource field
+ // with the native source id.
+ // For the rest we serialize them to look like ordinary sequential strings.
if (object_->map() != heap->native_source_string_map()) {
- // Usually we cannot recreate resources for external strings. To work
- // around this, external strings are serialized to look like ordinary
- // sequential strings.
- // The exception are native source code strings, since we can recreate
- // their resources.
- SerializeExternalStringAsSequentialString();
+ ExternalString* string = ExternalString::cast(object_);
+ Address resource = string->resource_as_address();
+ ExternalReferenceEncoder::Value reference;
+ if (serializer_->external_reference_encoder_.TryEncode(resource).To(
+ &reference)) {
+ DCHECK(reference.is_from_api());
+ string->set_uint32_as_resource(reference.index());
+ SerializeObject();
+ string->set_address_as_resource(resource);
+ } else {
+ SerializeExternalStringAsSequentialString();
+ }
} else {
ExternalOneByteString* string = ExternalOneByteString::cast(object_);
DCHECK(string->is_short());
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index eda25fbd35..22dcb26c8c 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -252,7 +252,7 @@ class Serializer : public SerializerDeserializer {
AllocatorT allocator_;
#ifdef OBJECT_PRINT
- static const int kInstanceTypes = 256;
+ static const int kInstanceTypes = LAST_TYPE + 1;
int* instance_type_count_;
size_t* instance_type_size_;
#endif // OBJECT_PRINT
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index e7efd87bd8..2bf50cc748 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -139,7 +139,7 @@ Code* Snapshot::DeserializeHandler(Isolate* isolate,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- isolate->logger()->LogCodeObject(code);
+ isolate->logger()->LogBytecodeHandler(bytecode, operand_scale, code);
}
return code;
@@ -358,10 +358,12 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
template SnapshotData::SnapshotData(
const Serializer<DefaultSerializerAllocator>* serializer);
-Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
+std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
+ uint32_t size = GetHeaderValue(kNumReservationsOffset);
+ std::vector<SerializedData::Reservation> reservations(size);
+ memcpy(reservations.data(), data_ + kHeaderSize,
+ size * sizeof(SerializedData::Reservation));
+ return reservations;
}
Vector<const byte> SnapshotData::Payload() const {
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index a13f2e8870..c6ea6a2bf1 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -15,8 +15,8 @@ namespace internal {
// These are meant for use with snapshot-external.cc. Should this file
// be compiled with those options we just supply these dummy implementations
// below. This happens when compiling the mksnapshot utility.
-void SetNativesFromFile(StartupData* data) { CHECK(false); }
-void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
+void SetNativesFromFile(StartupData* data) { UNREACHABLE(); }
+void SetSnapshotFromFile(StartupData* data) { UNREACHABLE(); }
void ReadNatives() {}
void DisposeNatives() {}
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 77b19d51a1..49e0f2298a 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -16,14 +16,14 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
DCHECK_LT(integer, 1 << 30);
integer <<= 2;
int bytes = 1;
- if (integer > 0xff) bytes = 2;
- if (integer > 0xffff) bytes = 3;
- if (integer > 0xffffff) bytes = 4;
+ if (integer > 0xFF) bytes = 2;
+ if (integer > 0xFFFF) bytes = 3;
+ if (integer > 0xFFFFFF) bytes = 4;
integer |= (bytes - 1);
- Put(static_cast<int>(integer & 0xff), "IntPart1");
- if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
- if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
- if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
+ Put(static_cast<int>(integer & 0xFF), "IntPart1");
+ if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xFF), "IntPart2");
+ if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xFF), "IntPart3");
+ if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xFF), "IntPart4");
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 2ffe5b6086..8f37e00c4a 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -31,7 +31,7 @@ class SnapshotData : public SerializedData {
: SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
}
- Vector<const Reservation> Reservations() const;
+ std::vector<Reservation> Reservations() const;
virtual Vector<const byte> Payload() const;
Vector<const byte> RawData() const {
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 91432e185a..e6f853fe0e 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -37,7 +37,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate->heap()->RepairFreeListsAfterDeserialization();
- isolate->heap()->IterateWeakRoots(this, VISIT_ALL);
+ isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
DeserializeDeferredObjects();
RestoreExternalReferenceRedirectors(accessor_infos());
RestoreExternalReferenceRedirectors(call_handler_infos());
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8b4a79b8b1..5ae6e33b87 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -95,7 +95,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// one entry with 'undefined' to terminate the partial snapshot cache.
Object* undefined = isolate()->heap()->undefined_value();
VisitRootPointer(Root::kPartialSnapshotCache, &undefined);
- isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
Pad();
}
@@ -122,8 +122,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->empty());
- CHECK_EQ(0, isolate->global_handles()->global_handles_count());
- CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
+
// Visit smi roots.
// Clear the stack limits to make the snapshot reproducible.
// Reset it again afterwards.
@@ -131,8 +130,7 @@ void StartupSerializer::SerializeStrongReferences() {
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
// First visit immortal immovables to make sure they end up in the first page.
- isolate->heap()->IterateStrongRoots(this,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION);
+ isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
void StartupSerializer::VisitRootPointers(Root root, Object** start,
@@ -185,5 +183,36 @@ bool StartupSerializer::MustBeDeferred(HeapObject* object) {
return !object->IsMap();
}
+SerializedHandleChecker::SerializedHandleChecker(
+ Isolate* isolate, std::vector<Context*>* contexts)
+ : isolate_(isolate) {
+ AddToSet(isolate->heap()->serialized_objects());
+ for (auto const& context : *contexts) {
+ AddToSet(context->serialized_objects());
+ }
+}
+
+void SerializedHandleChecker::AddToSet(FixedArray* serialized) {
+ int length = serialized->length();
+ for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
+}
+
+void SerializedHandleChecker::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if (serialized_.find(*p) != serialized_.end()) continue;
+ PrintF("%s handle not serialized: ",
+ root == Root::kGlobalHandles ? "global" : "eternal");
+ (*p)->Print();
+ ok_ = false;
+ }
+}
+
+bool SerializedHandleChecker::CheckGlobalAndEternalHandles() {
+ isolate_->global_handles()->IterateAllRoots(this);
+ isolate_->eternal_handles()->IterateAllRoots(this);
+ return ok_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 69985388e9..ae2a9f49df 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -83,6 +83,20 @@ class StartupSerializer : public Serializer<> {
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
+class SerializedHandleChecker : public RootVisitor {
+ public:
+ SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
+ virtual void VisitRootPointers(Root root, Object** start, Object** end);
+ bool CheckGlobalAndEternalHandles();
+
+ private:
+ void AddToSet(FixedArray* serialized);
+
+ Isolate* isolate_;
+ std::unordered_set<Object*> serialized_;
+ bool ok_ = true;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index be508f4f45..7693a229bf 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -129,7 +129,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
int value = current.data_.u_int_;
if (0x20 <= value && value <= 0x7F) {
Put(value);
- } else if (value <= 0xff) {
+ } else if (value <= 0xFF) {
Add("\\x%02x", value);
} else {
Add("\\u%04x", value);
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 4bdd5378fa..8d42b4c202 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -35,7 +35,7 @@ static const int kMinDecimalPower = -324;
// 2^64 = 18446744073709551616
static const uint64_t kMaxUint64 = V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
-
+// clang-format off
static const double exact_powers_of_ten[] = {
1.0, // 10^0
10.0,
@@ -59,9 +59,10 @@ static const double exact_powers_of_ten[] = {
10000000000000000000.0,
100000000000000000000.0, // 10^20
1000000000000000000000.0,
- // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
+ // 10^22 = 0x21E19E0C9BAB2400000 = 0x878678326EAC9 * 2^22
10000000000000000000000.0
};
+// clang-format on
static const int kExactPowersOfTenSize = arraysize(exact_powers_of_ten);
// Maximum number of significant digits in the decimal representation.
@@ -162,8 +163,11 @@ static bool DoubleStrtod(Vector<const char> trimmed,
// therefore accurate.
// Note that the ARM and MIPS simulators are compiled for 32bits. They
// therefore exhibit the same problem.
+ USE(exact_powers_of_ten);
+ USE(kMaxExactDoubleIntegerDecimalDigits);
+ USE(kExactPowersOfTenSize);
return false;
-#endif
+#else
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
int read_digits;
// The trimmed input fits into a double.
@@ -201,6 +205,7 @@ static bool DoubleStrtod(Vector<const char> trimmed,
}
}
return false;
+#endif
}
@@ -213,13 +218,20 @@ static DiyFp AdjustmentPowerOfTen(int exponent) {
// distance.
DCHECK_EQ(PowersOfTenCache::kDecimalExponentDistance, 8);
switch (exponent) {
- case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
- case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
- case 3: return DiyFp(V8_2PART_UINT64_C(0xfa000000, 00000000), -54);
- case 4: return DiyFp(V8_2PART_UINT64_C(0x9c400000, 00000000), -50);
- case 5: return DiyFp(V8_2PART_UINT64_C(0xc3500000, 00000000), -47);
- case 6: return DiyFp(V8_2PART_UINT64_C(0xf4240000, 00000000), -44);
- case 7: return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
+ case 1:
+ return DiyFp(V8_2PART_UINT64_C(0xA0000000, 00000000), -60);
+ case 2:
+ return DiyFp(V8_2PART_UINT64_C(0xC8000000, 00000000), -57);
+ case 3:
+ return DiyFp(V8_2PART_UINT64_C(0xFA000000, 00000000), -54);
+ case 4:
+ return DiyFp(V8_2PART_UINT64_C(0x9C400000, 00000000), -50);
+ case 5:
+ return DiyFp(V8_2PART_UINT64_C(0xC3500000, 00000000), -47);
+ case 6:
+ return DiyFp(V8_2PART_UINT64_C(0xF4240000, 00000000), -44);
+ case 7:
+ return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/third_party/utf8-decoder/LICENSE b/deps/v8/src/third_party/utf8-decoder/LICENSE
new file mode 100644
index 0000000000..b59bef2fb6
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/deps/v8/src/third_party/utf8-decoder/README.v8 b/deps/v8/src/third_party/utf8-decoder/README.v8
new file mode 100644
index 0000000000..e1e13ce53f
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/README.v8
@@ -0,0 +1,18 @@
+Name: DFA UTF-8 Decoder
+Short Name: utf8-decoder
+URL: http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+Version: 0
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Decodes UTF-8 bytes using a fast and simple definite finite automata.
+
+Local modifications:
+- Rejection state has been mapped to row 0 (instead of row 1) of the DFA,
+ saving some 50 bytes and making the table easier to reason about.
+- The transitions have been remapped to represent both a state transition and a
+ bit mask for the incoming byte.
+- The caller must now zero out the code point buffer after successful or
+ unsuccessful state transitions.
diff --git a/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
new file mode 100644
index 0000000000..5668e5ad9e
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
@@ -0,0 +1,78 @@
+// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details.
+// The remapped transition table is justified at
+// https://docs.google.com/spreadsheets/d/1AZcQwuEL93HmNCljJWUwFMGqf7JAQ0puawZaUgP0E14
+
+#include <stdint.h>
+
+#ifndef __UTF8_DFA_DECODER_H
+#define __UTF8_DFA_DECODER_H
+
+namespace Utf8DfaDecoder {
+
+enum State : uint8_t {
+ kReject = 0,
+ kAccept = 12,
+ kTwoByte = 24,
+ kThreeByte = 36,
+ kThreeByteLowMid = 48,
+ kFourByte = 60,
+ kFourByteLow = 72,
+ kThreeByteHigh = 84,
+ kFourByteMidHigh = 96,
+};
+
+static inline void Decode(uint8_t byte, State* state, uint32_t* buffer) {
+ // This first table maps bytes to character to a transition.
+ static constexpr uint8_t transitions[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00-0F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 10-1F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20-2F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 30-3F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40-4F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 50-5F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60-6F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 70-7F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 80-8F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 90-9F
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // A0-AF
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // B0-BF
+ 9, 9, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // C0-CF
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // D0-DF
+ 10, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 5, 5, // E0-EF
+ 11, 7, 7, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // F0-FF
+ };
+
+ // This second table maps a state to a new state when adding a transition.
+ // 00-7F
+ // | 80-8F
+ // | | 90-9F
+ // | | | A0-BF
+ // | | | | C2-DF
+ // | | | | | E1-EC, EE, EF
+ // | | | | | | ED
+ // | | | | | | | F1-F3
+ // | | | | | | | | F4
+ // | | | | | | | | | C0, C1, F5-FF
+ // | | | | | | | | | | E0
+ // | | | | | | | | | | | F0
+ static constexpr uint8_t states[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // REJECT = 0
+ 12, 0, 0, 0, 24, 36, 48, 60, 72, 0, 84, 96, // ACCEPT = 12
+ 0, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, // 2-byte = 24
+ 0, 24, 24, 24, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte = 36
+ 0, 24, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte low/mid = 48
+ 0, 36, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte = 60
+ 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte low = 72
+ 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, // 3-byte high = 84
+ 0, 0, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte mid/high = 96
+ };
+
+ DCHECK_NE(*state, State::kReject);
+ uint8_t type = transitions[byte];
+ *state = static_cast<State>(states[*state + type]);
+ *buffer = (*buffer << 6) | (byte & (0x7F >> (type >> 1)));
+}
+
+} // namespace Utf8DfaDecoder
+
+#endif /* __UTF8_DFA_DECODER_H */
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index 9b2a45c991..de9382e65b 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -43,7 +43,7 @@ void EscapeAndAppendString(const char* value, std::string* result) {
*result += "\\\\";
break;
default:
- if (c < '\040') {
+ if (c < '\x20') {
base::OS::SNPrintF(
number_buffer, arraysize(number_buffer), "\\u%04X",
static_cast<unsigned>(static_cast<unsigned char>(c)));
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 3e286620dc..28c107d88f 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -4,6 +4,7 @@
#include "src/tracing/tracing-category-observer.h"
+#include "src/base/atomic-utils.h"
#include "src/flags.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -37,12 +38,16 @@ void TracingCategoryObserver::OnTraceEnabled() {
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"), &enabled);
if (enabled) {
- v8::internal::FLAG_runtime_stats |= ENABLED_BY_TRACING;
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ (v8::internal::FLAG_runtime_stats | ENABLED_BY_TRACING));
}
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"), &enabled);
if (enabled) {
- v8::internal::FLAG_runtime_stats |= ENABLED_BY_SAMPLING;
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ v8::internal::FLAG_runtime_stats | ENABLED_BY_SAMPLING);
}
TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
&enabled);
@@ -57,8 +62,10 @@ void TracingCategoryObserver::OnTraceEnabled() {
}
void TracingCategoryObserver::OnTraceDisabled() {
- v8::internal::FLAG_runtime_stats &=
- ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
+ base::AsAtomic32::Relaxed_Store(
+ &v8::internal::FLAG_runtime_stats,
+ v8::internal::FLAG_runtime_stats &
+ ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING));
v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
v8::internal::FLAG_ic_stats &= ~ENABLED_BY_TRACING;
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 8e087b2e67..1d6f9a05be 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -17,7 +17,7 @@ void TransitionsAccessor::Initialize() {
encoding_ = kUninitialized;
} else if (HeapObject::cast(raw_transitions_)->IsWeakCell()) {
encoding_ = kWeakCell;
- } else if (StoreHandler::IsHandler(raw_transitions_)) {
+ } else if (HeapObject::cast(raw_transitions_)->IsStoreHandler()) {
encoding_ = kHandler;
} else if (HeapObject::cast(raw_transitions_)->IsTransitionArray()) {
encoding_ = kFullTransitionArray;
@@ -250,7 +250,7 @@ Object* TransitionsAccessor::SearchHandler(Name* name,
int transition = transitions()->Search(kData, name, NONE);
if (transition == kNotFound) return nullptr;
Object* raw_handler = transitions()->GetRawTarget(transition);
- if (StoreHandler::IsHandler(raw_handler)) {
+ if (raw_handler->IsStoreHandler()) {
return StoreHandler::ValidHandlerOrNull(raw_handler, name,
out_transition);
}
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 612cf51b45..1e02eeb34c 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -66,7 +66,7 @@ void ReleaseHandlerData(int index);
#define THREAD_LOCAL __thread
#endif
-inline bool UseTrapHandler() {
+inline bool IsTrapHandlerEnabled() {
return FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED;
}
@@ -75,14 +75,14 @@ extern THREAD_LOCAL int g_thread_in_wasm_code;
inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
inline void SetThreadInWasm() {
- if (UseTrapHandler()) {
+ if (IsTrapHandlerEnabled()) {
DCHECK(!IsThreadInWasm());
g_thread_in_wasm_code = true;
}
}
inline void ClearThreadInWasm() {
- if (UseTrapHandler()) {
+ if (IsTrapHandlerEnabled()) {
DCHECK(IsThreadInWasm());
g_thread_in_wasm_code = false;
}
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 11ce1561f9..d74a913901 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -23,6 +23,8 @@ std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
return os << "NumberOrOddball";
case BinaryOperationHint::kString:
return os << "String";
+ case BinaryOperationHint::kBigInt:
+ return os << "BigInt";
case BinaryOperationHint::kAny:
return os << "Any";
}
@@ -45,6 +47,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "String";
case CompareOperationHint::kSymbol:
return os << "Symbol";
+ case CompareOperationHint::kBigInt:
+ return os << "BigInt";
case CompareOperationHint::kReceiver:
return os << "Receiver";
case CompareOperationHint::kAny:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index 66102eae9a..e9ac639723 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -20,6 +20,7 @@ enum class BinaryOperationHint : uint8_t {
kNumber,
kNumberOrOddball,
kString,
+ kBigInt,
kAny
};
@@ -38,6 +39,7 @@ enum class CompareOperationHint : uint8_t {
kInternalizedString,
kString,
kSymbol,
+ kBigInt,
kReceiver,
kAny
};
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index ebebfaa1bd..7c0386ce52 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -113,8 +113,8 @@ unsigned Utf8::Encode(char* str,
uchar Utf8::ValueOf(const byte* bytes, size_t length, size_t* cursor) {
if (length <= 0) return kBadChar;
byte first = bytes[0];
- // Characters between 0000 and 0007F are encoded as a single character
- if (first <= kMaxOneByteChar) {
+ // Characters between 0000 and 007F are encoded as a single character
+ if (V8_LIKELY(first <= kMaxOneByteChar)) {
*cursor += 1;
return first;
}
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 22e5ca606e..4d7896ec37 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -21,7 +21,7 @@ static const uchar kSentinel = static_cast<uchar>(-1);
/**
* \file
- * Implementations of functions for working with unicode.
+ * Implementations of functions for working with Unicode.
*/
typedef signed short int16_t; // NOLINT
@@ -46,7 +46,7 @@ static inline bool IsStart(int32_t entry) {
#ifndef V8_INTL_SUPPORT
/**
- * Look up a character in the unicode table using a mix of binary and
+ * Look up a character in the Unicode table using a mix of binary and
* interpolation search. For a uniformly distributed array
* interpolation search beats binary search by a wide margin. However,
* in this case interpolation search degenerates because of some very
@@ -193,306 +193,91 @@ static int LookupMapping(const int32_t* table,
}
}
-static inline uint8_t NonASCIISequenceLength(byte first) {
- // clang-format off
- static const uint8_t lengths[256] = {
- // The first 128 entries correspond to ASCII characters.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* OO - Of */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 10 - 1f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 30 - 3f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 50 - 5f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 70 - 7f */
- // The following 64 entries correspond to continuation bytes.
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80 - 8f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 90 - 9f */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a0 - af */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b0 - bf */
- // The next are two invalid overlong encodings and 30 two-byte sequences.
- 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* c0-c1 + c2-cf */
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* d0-df */
- // 16 three-byte sequences.
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* e0-ef */
- // 5 four-byte sequences, followed by sequences that could only encode
- // code points outside of the unicode range.
- 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* f0-f4 + f5-ff */
- // clang-format on
- return lengths[first];
-}
-
-
-static inline bool IsContinuationCharacter(byte chr) {
- return chr >= 0x80 && chr <= 0xBF;
-}
-
// This method decodes an UTF-8 value according to RFC 3629 and
// https://encoding.spec.whatwg.org/#utf-8-decoder .
uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
+ DCHECK_GT(max_length, 0);
DCHECK_GT(str[0], kMaxOneByteChar);
- size_t length = NonASCIISequenceLength(str[0]);
-
- // Check continuation characters.
- size_t max_count = std::min(length, max_length);
- size_t count = 1;
- while (count < max_count && IsContinuationCharacter(str[count])) {
- count++;
- }
+ State state = State::kAccept;
+ Utf8IncrementalBuffer buffer = 0;
+ uchar t;
- if (length >= 3 && count < 2) {
- // Not enough continuation bytes to check overlong sequences.
- *cursor += 1;
- return kBadChar;
- }
+ size_t i = 0;
+ do {
+ t = ValueOfIncremental(str[i], &i, &state, &buffer);
+ } while (i < max_length && t == kIncomplete);
- // Check overly long sequences & other conditions.
- if (length == 3) {
- if (str[0] == 0xE0 && (str[1] < 0xA0 || str[1] > 0xBF)) {
- // Overlong three-byte sequence? The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- } else if (str[0] == 0xED && (str[1] < 0x80 || str[1] > 0x9F)) {
- // High and low surrogate halves? The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- }
- } else if (length == 4) {
- if (str[0] == 0xF0 && (str[1] < 0x90 || str[1] > 0xBF)) {
- // Overlong four-byte sequence. The first byte generates a kBadChar.
- *cursor += 1;
- return kBadChar;
- } else if (str[0] == 0xF4 && (str[1] < 0x80 || str[1] > 0x8F)) {
- // Code points outside of the unicode range. The first byte generates a
- // kBadChar.
- *cursor += 1;
- return kBadChar;
- }
- }
-
- *cursor += count;
-
- if (count != length) {
- // Not enough continuation characters.
- return kBadChar;
- }
-
- // All errors have been handled, so we only have to assemble the result.
- switch (length) {
- case 2:
- return ((str[0] << 6) + str[1]) - 0x00003080;
- case 3:
- return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
- case 4:
- return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
- 0x03C82080;
- }
-
- UNREACHABLE();
+ *cursor += i;
+ return (state == State::kAccept) ? t : kBadChar;
}
-/*
-Overlong sequence detection: Since Blink's TextCodecUTF8 rejects multi-byte
-characters which could be expressed with less bytes, we must too.
-
-Each continuation byte (10xxxxxx) carries 6 bits of payload. The lead bytes of
-1, 2, 3 and 4-byte characters are 0xxxxxxx, 110xxxxx, 1110xxxx and 11110xxx, and
-carry 7, 5, 4, and 3 bits of payload, respectively.
-
-Thus, a two-byte character can contain 11 bits of payload, a three-byte
-character 16, and a four-byte character 21.
-
-If we encounter a two-byte character which contains 7 bits or less, a three-byte
-character which contains 11 bits or less, or a four-byte character which
-contains 16 bits or less, we reject the character and generate a kBadChar for
-each of the bytes. This is because Blink handles overlong sequences by rejecting
-the first byte of the character (returning kBadChar); thus the rest are lonely
-continuation bytes and generate a kBadChar each.
-*/
-
-uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
+// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
+// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
+// when the stream is complete, to ensure incomplete sequences are handled.
+uchar Utf8::ValueOfIncremental(byte next, size_t* cursor, State* state,
+ Utf8IncrementalBuffer* buffer) {
DCHECK_NOT_NULL(buffer);
+ State old_state = *state;
+ *cursor += 1;
- // The common case: 1-byte Utf8 (and no incomplete char in the buffer)
- if (V8_LIKELY(next <= kMaxOneByteChar && *buffer == 0)) {
+ if (V8_LIKELY(next <= kMaxOneByteChar && old_state == State::kAccept)) {
+ DCHECK_EQ(0u, *buffer);
return static_cast<uchar>(next);
}
- if (*buffer == 0) {
- // We're at the start of a new character.
- uint32_t kind = NonASCIISequenceLength(next);
- CHECK_LE(kind, 4);
- if (kind >= 2) {
- // Start of 2..4 byte character, and no buffer.
-
- // The mask for the lower bits depends on the kind, and is
- // 0x1F, 0x0F, 0x07 for kinds 2, 3, 4 respectively. We can get that
- // with one shift.
- uint8_t mask = 0x7f >> kind;
+ // So we're at the lead byte of a 2/3/4 sequence, or we're at a continuation
+ // char in that sequence.
+ Utf8DfaDecoder::Decode(next, state, buffer);
- // Store the kind in the top nibble, and kind - 1 (i.e., remaining bytes)
- // in 2nd nibble, and the value in the bottom three. The 2nd nibble is
- // intended as a counter about how many bytes are still needed.
- uint32_t character_info = kind << 28 | (kind - 1) << 24;
- DCHECK_EQ(character_info & mask, 0);
- *buffer = character_info | (next & mask);
- return kIncomplete;
- } else {
- // No buffer, and not the start of a 1-byte char (handled at the
- // beginning), and not the start of a 2..4 byte char (or the start of an
- // overlong / invalid sequence)? Bad char.
+ switch (*state) {
+ case State::kAccept: {
+ uchar t = *buffer;
*buffer = 0;
- return kBadChar;
- }
- } else if (*buffer <= 0xff) {
- // We have one unprocessed byte left (from the last else case in this if
- // statement).
- uchar previous = *buffer;
- *buffer = 0;
- uchar t = ValueOfIncremental(previous, buffer);
- if (t == kIncomplete) {
- // If we have an incomplete character, process both the previous and the
- // next byte at once.
- return ValueOfIncremental(next, buffer);
- } else {
- // Otherwise, process the previous byte and save the next byte for next
- // time.
- DCHECK_EQ(0u, *buffer);
- *buffer = next;
return t;
}
- } else if (IsContinuationCharacter(next)) {
- // We're inside of a character, as described by buffer.
-
- // How many bytes (excluding this one) do we still expect?
- uint8_t bytes_expected = *buffer >> 28;
- uint8_t bytes_left = (*buffer >> 24) & 0x0f;
- // Two-byte overlong sequence detection is handled by
- // NonASCIISequenceLength, so we don't need to check anything here.
- if (bytes_expected == 3 && bytes_left == 2) {
- // Check that there are at least 12 bytes of payload.
- uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
- DCHECK_LE(lead_payload, 0xf);
- if (lead_payload == 0 && next < 0xa0) {
- // 0xa0 = 0b10100000 (payload: 100000). Overlong sequence: 0 bits from
- // the first byte, at most 5 from the second byte, and at most 6 from
- // the third -> in total at most 11.
-
- *buffer = next;
- return kBadChar;
- } else if (lead_payload == 0xd && next > 0x9f) {
- // The resulting code point would be on a range which is reserved for
- // UTF-16 surrogate halves.
- *buffer = next;
- return kBadChar;
- }
- } else if (bytes_expected == 4 && bytes_left == 3) {
- // Check that there are at least 17 bytes of payload.
- uint8_t lead_payload = *buffer & (0x7f >> bytes_expected);
+ case State::kReject:
+ *state = State::kAccept;
+ *buffer = 0;
- // If the lead byte was bigger than 0xf4 (payload: 4), it's not a start of
- // any valid character, and this is detected by NonASCIISequenceLength.
- DCHECK_LE(lead_payload, 0x4);
- if (lead_payload == 0 && next < 0x90) {
- // 0x90 = 10010000 (payload 10000). Overlong sequence: 0 bits from the
- // first byte, at most 4 from the second byte, at most 12 from the third
- // and fourth bytes -> in total at most 16.
- *buffer = next;
- return kBadChar;
- } else if (lead_payload == 4 && next > 0x8f) {
- // Invalid code point; value greater than 0b100001111000000000000
- // (0x10ffff).
- *buffer = next;
- return kBadChar;
+ // If we hit a bad byte, we need to determine if we were trying to start
+ // a sequence or continue one. If we were trying to start a sequence,
+ // that means it's just an invalid lead byte and we need to continue to
+ // the next (which we already did above). If we were already in a
+ // sequence, we need to reprocess this same byte after resetting to the
+ // initial state.
+ if (old_state != State::kAccept) {
+ // We were trying to continue a sequence, so let's reprocess this byte
+ // next time.
+ *cursor -= 1;
}
- }
+ return kBadChar;
- bytes_left--;
- // Update the value.
- uint32_t value = ((*buffer & 0xffffff) << 6) | (next & 0x3F);
- if (bytes_left) {
- *buffer = (bytes_expected << 28 | bytes_left << 24 | value);
+ default:
return kIncomplete;
- } else {
-#ifdef DEBUG
- // Check that overlong sequences were already detected.
- bool sequence_was_too_long = (bytes_expected == 2 && value < 0x80) ||
- (bytes_expected == 3 && value < 0x800) ||
- (bytes_expected == 4 && value < 0x8000);
- DCHECK(!sequence_was_too_long);
-#endif
- *buffer = 0;
- return value;
- }
- } else {
- // Within a character, but not a continuation character? Then the
- // previous char was a bad char. But we need to save the current
- // one.
- *buffer = next;
- return kBadChar;
}
}
-uchar Utf8::ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer) {
- DCHECK_NOT_NULL(buffer);
- if (*buffer == 0) {
+// Finishes the incremental decoding, ensuring that if an unfinished sequence
+// is left that it is replaced by a replacement char.
+uchar Utf8::ValueOfIncrementalFinish(State* state) {
+ if (*state == State::kAccept) {
return kBufferEmpty;
} else {
- // Process left-over chars. An incomplete char at the end maps to kBadChar.
- uchar t = ValueOfIncremental(0, buffer);
- return (t == kIncomplete) ? kBadChar : t;
+ DCHECK_GT(*state, State::kAccept);
+ *state = State::kAccept;
+ return kBadChar;
}
}
bool Utf8::ValidateEncoding(const byte* bytes, size_t length) {
- const byte* cursor = bytes;
- const byte* end = bytes + length;
-
- while (cursor < end) {
- // Skip over single-byte values.
- if (*cursor <= kMaxOneByteChar) {
- ++cursor;
- continue;
- }
-
- // Get the length the the character.
- size_t seq_length = NonASCIISequenceLength(*cursor);
- // For some invalid characters NonASCIISequenceLength returns 0.
- if (seq_length == 0) return false;
-
- const byte* char_end = cursor + seq_length;
-
- // Return false if we do not have enough bytes for the character.
- if (char_end > end) return false;
-
- // Check if the bytes of the character are continuation bytes.
- for (const byte* i = cursor + 1; i < char_end; ++i) {
- if (!IsContinuationCharacter(*i)) return false;
- }
-
- // Check overly long sequences & other conditions.
- if (seq_length == 3) {
- if (cursor[0] == 0xE0 && (cursor[1] < 0xA0 || cursor[1] > 0xBF)) {
- // Overlong three-byte sequence?
- return false;
- } else if (cursor[0] == 0xED && (cursor[1] < 0x80 || cursor[1] > 0x9F)) {
- // High and low surrogate halves?
- return false;
- }
- } else if (seq_length == 4) {
- if (cursor[0] == 0xF0 && (cursor[1] < 0x90 || cursor[1] > 0xBF)) {
- // Overlong four-byte sequence.
- return false;
- } else if (cursor[0] == 0xF4 && (cursor[1] < 0x80 || cursor[1] > 0x8F)) {
- // Code points outside of the unicode range.
- return false;
- }
- }
- cursor = char_end;
+ State state = State::kAccept;
+ Utf8IncrementalBuffer throw_away = 0;
+ for (size_t i = 0; i < length && state != State::kReject; i++) {
+ Utf8DfaDecoder::Decode(bytes[i], &state, &throw_away);
}
- return true;
+ return state == State::kAccept;
}
// Uppercase: point.category == 'Lu'
@@ -3333,7 +3118,7 @@ int CanonicalizationRange::Convert(uchar c,
}
-const uchar UnicodeData::kMaxCodePoint = 65533;
+const uchar UnicodeData::kMaxCodePoint = 0xFFFD;
int UnicodeData::GetByteCount() {
#ifndef V8_INTL_SUPPORT // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 04d58f3650..c6ce9a8eb2 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -7,6 +7,7 @@
#include <sys/types.h>
#include "src/globals.h"
+#include "src/third_party/utf8-decoder/utf8-decoder.h"
#include "src/utils.h"
/**
* \file
@@ -129,6 +130,8 @@ class Utf16 {
class V8_EXPORT_PRIVATE Utf8 {
public:
+ using State = Utf8DfaDecoder::State;
+
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
static inline unsigned Encode(char* out,
@@ -158,9 +161,9 @@ class V8_EXPORT_PRIVATE Utf8 {
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
typedef uint32_t Utf8IncrementalBuffer;
- static uchar ValueOfIncremental(byte next_byte,
+ static uchar ValueOfIncremental(byte next_byte, size_t* cursor, State* state,
Utf8IncrementalBuffer* buffer);
- static uchar ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer);
+ static uchar ValueOfIncrementalFinish(State* state);
// Excludes non-characters from the set of valid code points.
static inline bool IsValidCharacter(uchar c);
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 3ebf58857b..775c0ede2c 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -38,8 +38,8 @@ bool IsReservedPredicate(uc16 c) {
bool IsReplacementCharacter(const uint8_t* octets, int length) {
// The replacement character is at codepoint U+FFFD in the Unicode Specials
// table. Its UTF-8 encoding is 0xEF 0xBF 0xBD.
- if (length != 3 || octets[0] != 0xef || octets[1] != 0xbf ||
- octets[2] != 0xbd) {
+ if (length != 3 || octets[0] != 0xEF || octets[1] != 0xBF ||
+ octets[2] != 0xBD) {
return false;
}
return true;
@@ -444,7 +444,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
}
// We don't allow strings that are longer than a maximal length.
- DCHECK_LT(String::kMaxLength, 0x7fffffff - 6); // Cannot overflow.
+ DCHECK_LT(String::kMaxLength, 0x7FFFFFFF - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
@@ -468,10 +468,10 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
dest->SeqOneByteStringSet(dest_position + 1, 'u');
dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c >> 12));
dest->SeqOneByteStringSet(dest_position + 3,
- HexCharOfValue((c >> 8) & 0xf));
+ HexCharOfValue((c >> 8) & 0xF));
dest->SeqOneByteStringSet(dest_position + 4,
- HexCharOfValue((c >> 4) & 0xf));
- dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xf));
+ HexCharOfValue((c >> 4) & 0xF));
+ dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xF));
dest_position += 6;
} else if (IsNotEscaped(c)) {
dest->SeqOneByteStringSet(dest_position, c);
@@ -479,7 +479,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
} else {
dest->SeqOneByteStringSet(dest_position, '%');
dest->SeqOneByteStringSet(dest_position + 1, HexCharOfValue(c >> 4));
- dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xf));
+ dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xF));
dest_position += 3;
}
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index e6e98fabba..5b5d95ce9a 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -384,9 +384,9 @@ class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
#define DEFINE_BIT_FIELD_RANGE_TYPE(Name, Type, Size, _) \
k##Name##Start, k##Name##End = k##Name##Start + Size - 1,
-#define DEFINE_BIT_RANGES(LIST_MACRO) \
- struct LIST_MACRO##_Ranges { \
- enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) }; \
+#define DEFINE_BIT_RANGES(LIST_MACRO) \
+ struct LIST_MACRO##_Ranges { \
+ enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) kBitsCount }; \
};
#define DEFINE_BIT_FIELD_TYPE(Name, Type, Size, RangesName) \
@@ -641,7 +641,7 @@ class Access {
template<typename T>
class SetOncePointer {
public:
- SetOncePointer() : pointer_(nullptr) {}
+ SetOncePointer() = default;
bool is_set() const { return pointer_ != nullptr; }
@@ -655,8 +655,16 @@ class SetOncePointer {
pointer_ = value;
}
+ T* operator=(T* value) {
+ set(value);
+ return value;
+ }
+
+ bool operator==(std::nullptr_t) const { return pointer_ == nullptr; }
+ bool operator!=(std::nullptr_t) const { return pointer_ != nullptr; }
+
private:
- T* pointer_;
+ T* pointer_ = nullptr;
};
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 16107fdefc..a6d97e8ff1 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -19,6 +19,7 @@
#include "src/objects-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/runtime-profiler.h"
+#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
@@ -42,6 +43,9 @@ bool V8::Initialize() {
void V8::TearDown() {
+#if defined(USE_SIMULATOR)
+ Simulator::GlobalTearDown();
+#endif
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
RegisteredExtension::UnregisterAll();
@@ -65,10 +69,15 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+ base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
+
+ if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
Isolate::InitializeOncePerProcess();
+#if defined(USE_SIMULATOR)
+ Simulator::InitializeOncePerProcess();
+#endif
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
@@ -116,7 +125,7 @@ void V8::SetNativesBlob(StartupData* natives_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_natives_once, &SetNativesFromFile, natives_blob);
#else
- CHECK(false);
+ UNREACHABLE();
#endif
}
@@ -125,7 +134,7 @@ void V8::SetSnapshotBlob(StartupData* snapshot_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_snapshot_once, &SetSnapshotFromFile, snapshot_blob);
#else
- CHECK(false);
+ UNREACHABLE();
#endif
}
} // namespace internal
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index a7ce858022..218b173af2 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -1044,6 +1044,10 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
+ 'heap/stress-marking-observer.cc',
+ 'heap/stress-marking-observer.h',
+ 'heap/stress-scavenge-observer.cc',
+ 'heap/stress-scavenge-observer.h',
'heap/sweeper.cc',
'heap/sweeper.h',
'heap/worklist.h',
@@ -1160,11 +1164,15 @@
'objects/code.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
+ 'objects/data-handler.h',
+ 'objects/data-handler-inl.h',
'objects/debug-objects-inl.h',
'objects/debug-objects.cc',
'objects/debug-objects.h',
'objects/descriptor-array.h',
'objects/dictionary.h',
+ 'objects/fixed-array.h',
+ 'objects/fixed-array-inl.h',
'objects/frame-array.h',
'objects/frame-array-inl.h',
'objects/hash-table-inl.h',
@@ -1173,6 +1181,8 @@
'objects/intl-objects.h',
'objects/js-array.h',
'objects/js-array-inl.h',
+ 'objects/js-collection.h',
+ 'objects/js-collection-inl.h',
'objects/js-regexp.h',
'objects/js-regexp-inl.h',
'objects/literal-objects.cc',
@@ -1336,6 +1346,8 @@
'safepoint-table.h',
'setup-isolate.h',
'signature.h',
+ 'simulator-base.cc',
+ 'simulator-base.h',
'simulator.h',
'snapshot/builtin-deserializer-allocator.cc',
'snapshot/builtin-deserializer-allocator.h',
@@ -1396,6 +1408,7 @@
'strtod.h',
'ic/stub-cache.cc',
'ic/stub-cache.h',
+ 'third_party/utf8-decoder/utf8-decoder.h',
'tracing/trace-event.cc',
'tracing/trace-event.h',
'tracing/traced-value.cc',
@@ -1430,6 +1443,8 @@
'v8threads.h',
'value-serializer.cc',
'value-serializer.h',
+ 'vector-slot-pair.cc',
+ 'vector-slot-pair.h',
'vector.h',
'version.cc',
'version.h',
@@ -1437,9 +1452,11 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
+ 'wasm/baseline/liftoff-assembler-defs.h',
'wasm/baseline/liftoff-assembler.cc',
'wasm/baseline/liftoff-assembler.h',
'wasm/baseline/liftoff-compiler.cc',
+ 'wasm/baseline/liftoff-register.h',
'wasm/compilation-manager.cc',
'wasm/compilation-manager.h',
'wasm/decoder.h',
@@ -1461,15 +1478,18 @@
'wasm/streaming-decoder.h',
'wasm/wasm-api.cc',
'wasm/wasm-api.h',
+ 'wasm/wasm-code-manager.cc',
+ 'wasm/wasm-code-manager.h',
'wasm/wasm-code-specialization.cc',
'wasm/wasm-code-specialization.h',
'wasm/wasm-code-wrapper.cc',
'wasm/wasm-code-wrapper.h',
+ 'wasm/wasm-constants.h',
'wasm/wasm-debug.cc',
+ 'wasm/wasm-engine.cc',
+ 'wasm/wasm-engine.h',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
- 'wasm/wasm-heap.cc',
- 'wasm/wasm-heap.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-limits.h',
@@ -1895,6 +1915,8 @@
'base/once.cc',
'base/once.h',
'base/optional.h',
+ 'base/page-allocator.cc',
+ 'base/page-allocator.h',
'base/platform/elapsed-timer.h',
'base/platform/time.cc',
'base/platform/time.h',
@@ -2068,10 +2090,9 @@
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
- 'base/debug/stack_trace_posix.cc',
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
+ 'base/platform/platform-posix.cc'
'base/platform/platform-posix-time.h',
'base/platform/platform-posix-time.cc',
],
@@ -2337,12 +2358,10 @@
'js/macros.py',
'messages.h',
'js/prologue.js',
- 'js/v8natives.js',
'js/array.js',
'js/typedarray.js',
'js/messages.js',
'js/spread.js',
- 'js/proxy.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
@@ -2466,6 +2485,8 @@
'objects-inl.h',
'objects/code.h',
'objects/code-inl.h',
+ 'objects/fixed-array.h',
+ 'objects/fixed-array-inl.h',
'objects/js-array.h',
'objects/js-array-inl.h',
'objects/js-regexp.h',
@@ -2568,5 +2589,41 @@
},
],
},
+ {
+ 'target_name': 'v8_monolith',
+ 'type': 'static_library',
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'build_with_gn',
+ 'inputs': [
+ '../tools/node/build_gn.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/obj/libv8_monolith.a',
+ '<(INTERMEDIATE_DIR)/args.gn',
+ ],
+ 'action': [
+ '../tools/node/build_gn.py',
+ '<(CONFIGURATION_NAME)',
+ '../',
+ '<(INTERMEDIATE_DIR)',
+ 'v8_promise_internal_field_count=<(v8_promise_internal_field_count)',
+ 'target_cpu="<(target_arch)"',
+ 'target_os="<(OS)"',
+ 'v8_target_cpu="<(v8_target_arch)"',
+ 'v8_embedder_string="<(v8_embedder_string)"',
+ 'v8_use_snapshot=<(v8_use_snapshot)',
+ 'v8_optimized_debug=<(v8_optimized_debug)',
+ 'v8_enable_disassembler=<(v8_enable_disassembler)',
+ 'v8_postmortem_support=<(v8_postmortem_support)',
+ ],
+ },
+ ],
+ },
],
}
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 974ee2c76d..5e2ab19877 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -214,11 +214,11 @@ void ValueSerializer::WriteVarint(T value) {
uint8_t stack_buffer[sizeof(T) * 8 / 7 + 1];
uint8_t* next_byte = &stack_buffer[0];
do {
- *next_byte = (value & 0x7f) | 0x80;
+ *next_byte = (value & 0x7F) | 0x80;
next_byte++;
value >>= 7;
} while (value);
- *(next_byte - 1) &= 0x7f;
+ *(next_byte - 1) &= 0x7F;
WriteRawBytes(stack_buffer, next_byte - stack_buffer);
}
@@ -848,7 +848,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
WriteTag(SerializationTag::kWasmModule);
WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
- Handle<String> wire_bytes(compiled_part->module_bytes(), isolate_);
+ Handle<String> wire_bytes(compiled_part->shared()->module_bytes(), isolate_);
int wire_bytes_length = wire_bytes->length();
WriteVarint<uint32_t>(wire_bytes_length);
uint8_t* destination;
@@ -856,20 +856,10 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
}
- if (FLAG_wasm_jit_to_native) {
- std::pair<std::unique_ptr<byte[]>, size_t> serialized_module =
- wasm::NativeModuleSerializer::SerializeWholeModule(isolate_,
- compiled_part);
- WriteVarint<uint32_t>(static_cast<uint32_t>(serialized_module.second));
- WriteRawBytes(serialized_module.first.get(), serialized_module.second);
- } else {
- std::unique_ptr<ScriptData> script_data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
- compiled_part);
- int script_data_length = script_data->length();
- WriteVarint<uint32_t>(script_data_length);
- WriteRawBytes(script_data->data(), script_data_length);
- }
+ std::pair<std::unique_ptr<const byte[]>, size_t> serialized_module =
+ wasm::SerializeNativeModule(isolate_, compiled_part);
+ WriteVarint<uint32_t>(static_cast<uint32_t>(serialized_module.second));
+ WriteRawBytes(serialized_module.first.get(), serialized_module.second);
return ThrowIfOutOfMemory();
}
@@ -1032,7 +1022,7 @@ Maybe<T> ValueDeserializer::ReadVarint() {
if (position_ >= end_) return Nothing<T>();
uint8_t byte = *position_;
if (V8_LIKELY(shift < sizeof(T) * 8)) {
- value |= static_cast<T>(byte & 0x7f) << shift;
+ value |= static_cast<T>(byte & 0x7F) << shift;
shift += 7;
}
has_another_byte = byte & 0x80;
@@ -1716,23 +1706,11 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
}
// Try to deserialize the compiled module first.
- Handle<FixedArray> compiled_part;
+ Handle<WasmCompiledModule> compiled_module;
MaybeHandle<JSObject> result;
- if (FLAG_wasm_jit_to_native) {
- if (wasm::NativeModuleDeserializer::DeserializeFullBuffer(
- isolate_, compiled_bytes, wire_bytes)
- .ToHandle(&compiled_part)) {
- result = WasmModuleObject::New(
- isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
- }
- } else {
- ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
- if (WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate_, &script_data, wire_bytes)
- .ToHandle(&compiled_part)) {
- result = WasmModuleObject::New(
- isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
- }
+ if (wasm::DeserializeNativeModule(isolate_, compiled_bytes, wire_bytes)
+ .ToHandle(&compiled_module)) {
+ result = WasmModuleObject::New(isolate_, compiled_module);
}
if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
diff --git a/deps/v8/src/vector-slot-pair.cc b/deps/v8/src/vector-slot-pair.cc
new file mode 100644
index 0000000000..e639a9037e
--- /dev/null
+++ b/deps/v8/src/vector-slot-pair.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/vector-slot-pair.h"
+
+#include "src/feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+
+VectorSlotPair::VectorSlotPair() {}
+
+int VectorSlotPair::index() const {
+ return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
+}
+
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return lhs.slot() == rhs.slot() &&
+ lhs.vector().location() == rhs.vector().location();
+}
+
+bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return !(lhs == rhs);
+}
+
+std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair) {
+ if (pair.IsValid()) {
+ return os << "VectorSlotPair(" << pair.slot() << ")";
+ }
+ return os << "VectorSlotPair(INVALID)";
+}
+
+size_t hash_value(VectorSlotPair const& p) {
+ return base::hash_combine(p.slot(), p.vector().location());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/vector-slot-pair.h b/deps/v8/src/vector-slot-pair.h
new file mode 100644
index 0000000000..cd9434c630
--- /dev/null
+++ b/deps/v8/src/vector-slot-pair.h
@@ -0,0 +1,47 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VECTOR_SLOT_PAIR_H_
+#define V8_VECTOR_SLOT_PAIR_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class FeedbackVector;
+
+// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
+// is used to access the type feedback for a certain {Node}.
+class V8_EXPORT_PRIVATE VectorSlotPair {
+ public:
+ VectorSlotPair();
+ VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : vector_(vector), slot_(slot) {}
+
+ bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
+
+ Handle<FeedbackVector> vector() const { return vector_; }
+ FeedbackSlot slot() const { return slot_; }
+
+ int index() const;
+
+ private:
+ Handle<FeedbackVector> vector_;
+ FeedbackSlot slot_;
+};
+
+bool operator==(VectorSlotPair const&, VectorSlotPair const&);
+bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
+
+std::ostream& operator<<(std::ostream& os, const VectorSlotPair& pair);
+
+size_t hash_value(VectorSlotPair const&);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VECTOR_SLOT_PAIR_H_
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
deleted file mode 100644
index d115b3f83d..0000000000
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index d632e39aff..7f7993d34f 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
deleted file mode 100644
index 18f49fae68..0000000000
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 2578301ad5..8d28c2b21c 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
deleted file mode 100644
index 6fd95caf41..0000000000
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 696e2544c0..a8b5b32bdc 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -19,32 +19,50 @@ namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
// is located at ebp-24.
- constexpr int32_t kStackSlotSize = 8;
constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(ebp, kFirstStackSlotOffset - index * kStackSlotSize);
+ return Operand(
+ ebp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(ebp, -16); }
+static constexpr LiftoffRegList kByteRegs =
+ LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
+static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
+static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
+ "kByteRegs only contains gp cache registers");
+
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C.
+static constexpr Register kCCallLastArgAddrReg = eax;
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
- stack_space_ = space;
- sub(esp, Immediate(space));
+static constexpr DoubleRegister kScratchDoubleReg = xmm7;
+
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+ DCHECK_LE(bytes, kMaxInt);
+ sub(esp, Immediate(bytes));
}
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
- xor_(reg, reg);
+ xor_(reg.gp(), reg.gp());
} else {
- mov(reg, Immediate(value.to_i32()));
+ mov(reg.gp(), Immediate(value.to_i32()));
}
break;
+ case kWasmF32: {
+ Register tmp = GetUnusedRegister(kGpReg).gp();
+ mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
+ movd(reg.fp(), tmp);
+ break;
+ }
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
}
@@ -60,46 +78,109 @@ void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
}
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {
- Operand src_op = Operand(src_addr, offset_imm);
+void LiftoffAssembler::FillContextInto(Register dst) {
+ mov(dst, liftoff::GetContextOperand());
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ Operand src_op = offset_reg == no_reg
+ ? Operand(src_addr, offset_imm)
+ : Operand(src_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register src = GetUnusedRegister(kGpReg, pinned);
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
mov(src, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(src, src, offset_reg);
+ }
src_op = Operand(src_addr, src, times_1, 0);
}
- DCHECK_EQ(4, size);
- mov(dst, src_op);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ movzx_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ movsx_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ movzx_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ movsx_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ mov(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ movss(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {
- Operand dst_op = Operand(dst_addr, offset_imm);
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register dst = GetUnusedRegister(kGpReg, pinned);
+ Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
mov(dst, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(dst, dst, offset_reg);
+ }
dst_op = Operand(dst_addr, dst, times_1, 0);
}
- DCHECK_EQ(4, size);
- mov(dst_op, src);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (src.gp().is_byte_register()) {
+ mov_b(dst_op, src.gp());
+ } else {
+ Register byte_src = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ mov(byte_src, src.gp());
+ mov_b(dst_op, byte_src);
+ }
+ break;
+ case StoreType::kI32Store16:
+ mov_w(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store:
+ mov(dst_op, src.gp());
+ break;
+ case StoreType::kF32Store:
+ movss(dst_op, src.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
- constexpr int32_t kCallerStackSlotSize = 4;
- mov(dst, Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
+ Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
+ if (dst.is_gp()) {
+ mov(dst.gp(), src);
+ } else {
+ movss(dst.fp(), src);
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register()) {
- Register reg = GetUnusedRegister(kGpReg);
+ if (cache_state_.has_unused_register(kGpReg)) {
+ LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
@@ -108,23 +189,60 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {
- if (reg != eax) mov(eax, reg);
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg);
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ // The caller should check that the registers are not equal. For most
+ // occurences, this is already guaranteed, so no need to check within this
+ // method.
+ DCHECK_NE(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ mov(dst.gp(), src.gp());
+ } else {
+ movsd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {
- // TODO(clemensh): Handle different types here.
- mov(liftoff::GetStackSlot(index), reg);
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ Operand dst = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ mov(dst, reg.gp());
+ } else {
+ movsd(dst, reg.fp());
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- // TODO(clemensh): Handle different types here.
- mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32:
+ mov(dst, Immediate(value.to_i32()));
+ break;
+ case kWasmF32:
+ mov(dst, Immediate(value.to_f32_boxed().get_bits()));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {
- // TODO(clemensh): Handle different types here.
- mov(reg, liftoff::GetStackSlot(index));
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ Operand src = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ mov(reg.gp(), src);
+ } else {
+ movsd(reg.fp(), src);
+ }
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -163,11 +281,303 @@ COMMUTATIVE_I32_BINOP(or, or_)
COMMUTATIVE_I32_BINOP(xor, xor_)
// clang-format on
-#undef DEFAULT_I32_BINOP
+#undef COMMUTATIVE_I32_BINOP
+
+namespace liftoff {
+inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
+ Register lhs, Register rhs,
+ void (Assembler::*emit_shift)(Register)) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
+ // If dst is ecx, compute into a tmp register first, then move to ecx.
+ if (dst == ecx) {
+ Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->mov(tmp, lhs);
+ if (rhs != ecx) assm->mov(ecx, rhs);
+ (assm->*emit_shift)(tmp);
+ assm->mov(ecx, tmp);
+ return;
+ }
+
+ // Move rhs into ecx. If ecx is in use, move its content to a tmp register
+ // first. If lhs is ecx, lhs is now the tmp register.
+ Register tmp_reg = no_reg;
+ if (rhs != ecx) {
+ if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->mov(tmp_reg, ecx);
+ if (lhs == ecx) lhs = tmp_reg;
+ }
+ assm->mov(ecx, rhs);
+ }
+
+ // Do the actual shift.
+ if (dst != lhs) assm->mov(dst, lhs);
+ (assm->*emit_shift)(dst);
+
+ // Restore ecx if needed.
+ if (tmp_reg.is_valid()) assm->mov(ecx, tmp_reg);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
+}
+
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
+}
+
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
+}
+
+bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ Register tmp_byte_reg = dst;
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (!dst.is_byte_register()) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ }
+
+ test(src, src);
+ setcc(zero, tmp_byte_reg);
+ movzx_b(dst, tmp_byte_reg);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ test(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ mov(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get most significant bit set (MSBS).
+ bsr(dst, src);
+ // CLZ = 31 - MSBS = MSBS ^ 31.
+ xor_(dst, 31);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ test(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ mov(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get least significant bit set, which equals number of trailing zeros.
+ bsf(dst, src);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ if (!CpuFeatures::IsSupported(POPCNT)) return false;
+ CpuFeatureScope scope(this, POPCNT);
+ popcnt(dst, src);
+ return true;
+}
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ emit_i32_add(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ addss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ subss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ subss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ mulss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ cmp(lhs, rhs);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ j(cond, label);
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ Register limit = GetUnusedRegister(kGpReg).gp();
+ mov(limit, Immediate(ExternalReference::address_of_stack_limit(isolate())));
+ cmp(esp, Operand(limit, 0));
+ j(below_equal, ool_code);
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ TurboAssembler::AssertUnreachable(reason);
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
+ push(liftoff::GetStackSlot(src_index));
+ break;
+ case VarState::kRegister:
+ PushCallerFrameSlot(src.reg());
+ break;
+ case VarState::kI32Const:
+ push(Immediate(src.i32_const()));
+ break;
+ }
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ if (reg.is_gp()) {
+ push(reg.gp());
+ } else {
+ sub(esp, Immediate(kPointerSize));
+ movss(Operand(esp, 0), reg.fp());
+ }
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ push(reg.gp());
+ gp_regs.clear(reg);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ sub(esp, Immediate(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ movsd(Operand(esp, offset), reg.fp());
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ movsd(reg.fp(), Operand(esp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) add(esp, Immediate(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ pop(reg.gp());
+ gp_regs.clear(reg);
+ }
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kPointerSize));
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ for (size_t param = 0; param < num_params; ++param) {
+ push(args[param]);
+ }
+ mov(liftoff::kCCallLastArgAddrReg, esp);
+ constexpr Register kScratch = ebx;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ PrepareCallCFunction(num_params, kScratch);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ lea(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ constexpr Register kScratch = ebx;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ lea(kScratch, Operand(liftoff::kCCallLastArgAddrReg, offset));
+ mov(Operand(esp, param_idx * kPointerSize), kScratch);
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ wasm_call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ // Set context to zero.
+ xor_(esi, esi);
+ CallRuntimeDelayed(zone, fid);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ sub(esp, Immediate(size));
+ mov(addr, esp);
+}
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
- test(reg, reg);
- j(zero, label);
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ add(esp, Immediate(size));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
new file mode 100644
index 0000000000..3eef1e1960
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -0,0 +1,64 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+
+#include "src/reglist.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/assembler-x64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if V8_TARGET_ARCH_IA32
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
+
+// Omit xmm7, which is the kScratchDoubleReg.
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6>();
+
+#elif V8_TARGET_ARCH_X64
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
+
+#else
+
+constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
+
+#endif
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+constexpr Condition kEqual = equal;
+constexpr Condition kUnsignedGreaterEqual = above_equal;
+#else
+// On unimplemented platforms, just make this compile.
+constexpr Condition kEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+#endif
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 8a68fe4d91..121cfeea6a 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -29,20 +29,21 @@ namespace {
class StackTransferRecipe {
struct RegisterMove {
- Register dst;
- Register src;
- constexpr RegisterMove(Register dst, Register src) : dst(dst), src(src) {}
+ LiftoffRegister dst;
+ LiftoffRegister src;
+ constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
+ : dst(dst), src(src) {}
};
struct RegisterLoad {
- Register dst;
+ LiftoffRegister dst;
bool is_constant_load; // otherwise load it from the stack.
union {
uint32_t stack_slot;
WasmValue constant;
};
- RegisterLoad(Register dst, WasmValue constant)
+ RegisterLoad(LiftoffRegister dst, WasmValue constant)
: dst(dst), is_constant_load(true), constant(constant) {}
- RegisterLoad(Register dst, uint32_t stack_slot)
+ RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
: dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
};
@@ -54,18 +55,16 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
- if ((move_dst_regs & move_src_regs) == 0) {
+ if ((move_dst_regs & move_src_regs).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
register_moves.clear();
} else {
// Keep use counters of src registers.
- constexpr size_t kRegArrSize =
- LiftoffAssembler::CacheState::kMaxRegisterCode + 1;
- uint32_t src_reg_use_count[kRegArrSize] = {0};
+ uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
for (RegisterMove& rm : register_moves) {
- ++src_reg_use_count[rm.src.code()];
+ ++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
// whose dst register does not appear as src any more. The remaining moves
@@ -77,11 +76,11 @@ class StackTransferRecipe {
while (!register_moves.empty()) {
int executed_moves = 0;
for (auto& rm : register_moves) {
- if (src_reg_use_count[rm.dst.code()] == 0) {
+ if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
asm_->Move(rm.dst, rm.src);
++executed_moves;
- DCHECK_LT(0, src_reg_use_count[rm.src.code()]);
- --src_reg_use_count[rm.src.code()];
+ DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
+ --src_reg_use_count[rm.src.liftoff_code()];
} else if (executed_moves) {
// Compaction: Move not-executed moves to the beginning of the list.
(&rm)[-executed_moves] = rm;
@@ -89,17 +88,18 @@ class StackTransferRecipe {
}
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
- Register spill_reg = register_moves.back().src;
+ // TODO(clemensh): Use an unused register if available.
+ LiftoffRegister spill_reg = register_moves.back().src;
asm_->Spill(next_spill_slot, spill_reg);
// Remember to reload into the destination register later.
LoadStackSlot(register_moves.back().dst, next_spill_slot);
- DCHECK_EQ(1, src_reg_use_count[spill_reg.code()]);
- src_reg_use_count[spill_reg.code()] = 0;
+ DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
+ src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
- constexpr RegisterMove dummy(no_reg, no_reg);
- register_moves.resize(register_moves.size() - executed_moves, dummy);
+ register_moves.erase(register_moves.end() - executed_moves,
+ register_moves.end());
}
}
@@ -127,43 +127,50 @@ class StackTransferRecipe {
case VarState::kRegister:
asm_->Spill(dst_index, src.reg());
break;
- case VarState::kConstant:
- // TODO(clemensh): Handle other types than i32.
+ case VarState::kI32Const:
asm_->Spill(dst_index, WasmValue(src.i32_const()));
break;
}
break;
case VarState::kRegister:
- switch (src.loc()) {
- case VarState::kStack:
- LoadStackSlot(dst.reg(), src_index);
- break;
- case VarState::kRegister:
- if (dst.reg() != src.reg()) MoveRegister(dst.reg(), src.reg());
- break;
- case VarState::kConstant:
- LoadConstant(dst.reg(), WasmValue(src.i32_const()));
- break;
- }
+ LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::kConstant:
+ case VarState::kI32Const:
DCHECK_EQ(dst, src);
break;
}
}
- void MoveRegister(Register dst, Register src) {
- DCHECK_EQ(0, move_dst_regs & dst.bit());
- move_dst_regs |= dst.bit();
- move_src_regs |= src.bit();
+ void LoadIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadStackSlot(dst, src_index);
+ break;
+ case VarState::kRegister:
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (dst != src.reg()) MoveRegister(dst, src.reg());
+ break;
+ case VarState::kI32Const:
+ LoadConstant(dst, WasmValue(src.i32_const()));
+ break;
+ }
+ }
+
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
+ DCHECK_NE(dst, src);
+ DCHECK(!move_dst_regs.has(dst));
+ move_dst_regs.set(dst);
+ move_src_regs.set(src);
register_moves.emplace_back(dst, src);
}
- void LoadConstant(Register dst, WasmValue value) {
+ void LoadConstant(LiftoffRegister dst, WasmValue value) {
register_loads.emplace_back(dst, value);
}
- void LoadStackSlot(Register dst, uint32_t stack_index) {
+ void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
register_loads.emplace_back(dst, stack_index);
}
@@ -171,8 +178,8 @@ class StackTransferRecipe {
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
- RegList move_dst_regs = 0;
- RegList move_src_regs = 0;
+ LiftoffRegList move_dst_regs;
+ LiftoffRegList move_src_regs;
LiftoffAssembler* const asm_;
};
@@ -199,14 +206,15 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
for (; src_idx < src_end; ++src_idx, ++dst_idx) {
auto& dst = stack_state[dst_idx];
auto& src = source.stack_state[src_idx];
- Register reg = no_reg;
+ // Just initialize to any register; will be overwritten before use.
+ LiftoffRegister reg(Register::from_code<0>());
+ RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
if (src.is_reg() && is_free(src.reg())) {
reg = src.reg();
- } else if (has_unused_register()) {
- reg = unused_register();
+ } else if (has_unused_register(rc)) {
+ reg = unused_register(rc);
} else {
// Make this a stack slot.
- DCHECK(src.is_stack());
dst = VarState(src.type());
continue;
}
@@ -224,20 +232,19 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
if (is_used(src.reg())) {
// Make this a stack slot.
dst = VarState(src.type());
- continue;
+ } else {
+ dst = VarState(src.type(), src.reg());
+ inc_used(src.reg());
}
- dst = VarState(src.type(), src.reg());
- inc_used(src.reg());
} else if (src.is_const()) {
dst = src;
} else {
- // Keep this a stack slot (which is the initial value).
DCHECK(src.is_stack());
- DCHECK(dst.is_stack());
- continue;
+ // Make this a stack slot.
+ dst = VarState(src.type());
}
}
- last_spilled_reg = source.last_spilled_reg;
+ last_spilled_regs = source.last_spilled_regs;
}
void LiftoffAssembler::CacheState::Steal(CacheState& source) {
@@ -250,6 +257,8 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
+// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
+// size.
LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
: TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
@@ -259,35 +268,48 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
-Register LiftoffAssembler::GetBinaryOpTargetRegister(
- RegClass rc, PinnedRegisterScope pinned) {
+LiftoffRegister LiftoffAssembler::GetBinaryOpTargetRegister(
+ RegClass rc, LiftoffRegList pinned) {
auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
if (slot_lhs.is_reg() && GetNumUses(slot_lhs.reg()) == 1) {
+ DCHECK_EQ(rc, slot_lhs.reg().reg_class());
return slot_lhs.reg();
}
auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
if (slot_rhs.is_reg() && GetNumUses(slot_rhs.reg()) == 1) {
+ DCHECK_EQ(rc, slot_rhs.reg().reg_class());
return slot_rhs.reg();
}
return GetUnusedRegister(rc, pinned);
}
-Register LiftoffAssembler::PopToRegister(RegClass rc,
- PinnedRegisterScope pinned) {
+LiftoffRegister LiftoffAssembler::GetUnaryOpTargetRegister(
+ RegClass rc, LiftoffRegList pinned) {
+ auto& slot_src = cache_state_.stack_state.back();
+ if (slot_src.is_reg() && GetNumUses(slot_src.reg()) == 1) {
+ DCHECK_EQ(rc, slot_src.reg().reg_class());
+ return slot_src.reg();
+ }
+ return GetUnusedRegister(rc, pinned);
+}
+
+LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
+ LiftoffRegList pinned) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
switch (slot.loc()) {
case VarState::kStack: {
- Register reg = GetUnusedRegister(rc, pinned);
+ LiftoffRegister reg = GetUnusedRegister(rc, pinned);
Fill(reg, cache_state_.stack_height());
return reg;
}
case VarState::kRegister:
+ DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::kConstant: {
- Register reg = GetUnusedRegister(rc, pinned);
+ case VarState::kI32Const: {
+ LiftoffRegister reg = GetUnusedRegister(rc, pinned);
LoadConstant(reg, WasmValue(slot.i32_const()));
return reg;
}
@@ -333,7 +355,7 @@ void LiftoffAssembler::Spill(uint32_t index) {
Spill(index, slot.reg());
cache_state_.dec_used(slot.reg());
break;
- case VarState::kConstant:
+ case VarState::kI32Const:
Spill(index, WasmValue(slot.i32_const()));
break;
}
@@ -346,25 +368,112 @@ void LiftoffAssembler::SpillLocals() {
}
}
-Register LiftoffAssembler::SpillOneRegister(RegClass rc,
- PinnedRegisterScope pinned_regs) {
- DCHECK_EQ(kGpReg, rc);
+void LiftoffAssembler::SpillAllRegisters() {
+ for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
+ auto& slot = cache_state_.stack_state[i];
+ if (!slot.is_reg()) continue;
+ Spill(i, slot.reg());
+ slot.MakeStack();
+ }
+ cache_state_.reset_used_registers();
+}
+void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_desc) {
+ uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
+ // Parameter 0 is the wasm context.
+ constexpr size_t kFirstActualParameter = 1;
+ DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
+
+ // Input 0 is the call target.
+ constexpr size_t kInputShift = 1;
+
+ // Spill all cache slots which are not being used as parameters.
+ // Don't update any register use counters, they will be reset later anyway.
+ for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
+ idx < end; ++idx) {
+ VarState& slot = cache_state_.stack_state[idx];
+ if (!slot.is_reg()) continue;
+ Spill(idx, slot.reg());
+ slot.MakeStack();
+ }
+
+ StackTransferRecipe stack_transfers(this);
+
+ // Now move all parameter values into the right slot for the call.
+ // Process parameters backward, such that we can just pop values from the
+ // stack.
+ for (uint32_t i = num_params; i > 0; --i) {
+ uint32_t param = i - 1;
+ ValueType type = sig->GetParam(param);
+ RegClass rc = reg_class_for(type);
+ compiler::LinkageLocation loc = call_desc->GetInputLocation(
+ param + kFirstActualParameter + kInputShift);
+ const VarState& slot = cache_state_.stack_state.back();
+ uint32_t stack_idx = cache_state_.stack_height() - 1;
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ int reg_code = loc.AsRegister();
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
+ stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ PushCallerFrameSlot(slot, stack_idx);
+ }
+ cache_state_.stack_state.pop_back();
+ }
+
+ // Execute the stack transfers before filling the context register.
+ stack_transfers.Execute();
+
+ // Reset register use counters.
+ cache_state_.reset_used_registers();
+
+ // Fill the wasm context into the right register.
+ compiler::LinkageLocation context_loc =
+ call_desc->GetInputLocation(kInputShift);
+ DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
+ int context_reg_code = context_loc.AsRegister();
+ LiftoffRegister context_reg(Register::from_code(context_reg_code));
+ FillContextInto(context_reg.gp());
+}
+
+void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_desc) {
+ size_t return_count = call_desc->ReturnCount();
+ DCHECK_EQ(return_count, sig->return_count());
+ if (return_count != 0) {
+ DCHECK_EQ(1, return_count);
+ compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
+ int return_reg_code = return_loc.AsRegister();
+ ValueType return_type = sig->GetReturn(0);
+ LiftoffRegister return_reg =
+ LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
+ DCHECK(!cache_state_.is_used(return_reg));
+ PushRegister(return_type, return_reg);
+ }
+}
+
+LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned) {
// Spill one cached value to free a register.
- Register spill_reg = cache_state_.GetNextSpillReg(pinned_regs);
- int remaining_uses = cache_state_.register_use_count[spill_reg.code()];
+ LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
+ SpillRegister(spill_reg);
+ return spill_reg;
+}
+
+void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
+ int remaining_uses = cache_state_.get_use_count(reg);
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
- auto& slot = cache_state_.stack_state[idx];
- if (!slot.is_reg() || slot.reg() != spill_reg) continue;
- Spill(idx, spill_reg);
- slot.MakeStack();
+ auto* slot = &cache_state_.stack_state[idx];
+ if (!slot->is_reg() || slot->reg() != reg) continue;
+ Spill(idx, reg);
+ slot->MakeStack();
if (--remaining_uses == 0) break;
}
- cache_state_.register_use_count[spill_reg.code()] = 0;
- cache_state_.used_registers &= ~spill_reg.bit();
- return spill_reg;
+ cache_state_.clear_used(reg);
}
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
@@ -378,7 +487,20 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
- return kPointerSize * (num_locals() + kMaxValueStackHeight);
+ return num_locals() + kMaxValueStackHeight;
+}
+
+std::ostream& operator<<(std::ostream& os, VarState slot) {
+ os << WasmOpcodes::TypeName(slot.type()) << ":";
+ switch (slot.loc()) {
+ case VarState::kStack:
+ return os << "s";
+ case VarState::kRegister:
+ return os << slot.reg();
+ case VarState::kI32Const:
+ return os << "c" << slot.i32_const();
+ }
+ UNREACHABLE();
}
#undef __
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 55deb593f8..b91f6d7c88 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -5,38 +5,21 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
+#include <iosfwd>
#include <memory>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
-// Include platform specific definitions.
-#if V8_TARGET_ARCH_IA32
-#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h"
-#else
-#error Unsupported architecture.
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -44,51 +27,26 @@ namespace wasm {
// Forward declarations.
struct ModuleEnv;
-enum RegClass { kNoReg, kGpReg, kFpReg };
-
-// TODO(clemensh): Switch to a switch once we require C++14 support.
-static constexpr RegClass reg_class_for(ValueType type) {
- return type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
-}
-
class LiftoffAssembler : public TurboAssembler {
public:
// TODO(clemensh): Remove this limitation by allocating more stack space if
// needed.
static constexpr int kMaxValueStackHeight = 8;
- class PinnedRegisterScope {
- public:
- PinnedRegisterScope() : pinned_regs_(0) {}
- explicit PinnedRegisterScope(RegList regs) : pinned_regs_(regs) {}
-
- Register pin(Register reg) {
- pinned_regs_ |= reg.bit();
- return reg;
- }
-
- RegList pinned_regs() const { return pinned_regs_; }
- bool has(Register reg) const { return (pinned_regs_ & reg.bit()) != 0; }
-
- private:
- RegList pinned_regs_ = 0;
- };
- static_assert(IS_TRIVIALLY_COPYABLE(PinnedRegisterScope),
- "PinnedRegisterScope can be passed by value");
+ // Each slot in our stack frame currently has exactly 8 bytes.
+ static constexpr uint32_t kStackSlotSize = 8;
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, kConstant };
+ enum Location : uint8_t { kStack, kRegister, kI32Const };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
- explicit VarState(ValueType type, Register r)
- : loc_(kRegister), type_(type), reg_(r) {}
+ explicit VarState(ValueType type, LiftoffRegister r)
+ : loc_(kRegister), type_(type), reg_(r) {
+ DCHECK_EQ(r.reg_class(), reg_class_for(type));
+ }
explicit VarState(ValueType type, uint32_t i32_const)
- : loc_(kConstant), type_(type), i32_const_(i32_const) {
+ : loc_(kI32Const), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
@@ -99,29 +57,33 @@ class LiftoffAssembler : public TurboAssembler {
return true;
case kRegister:
return reg_ == other.reg_;
- case kConstant:
+ case kI32Const:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
}
bool is_stack() const { return loc_ == kStack; }
+ bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
+ bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == kConstant; }
+ bool is_const() const { return loc_ == kI32Const; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
uint32_t i32_const() const {
- DCHECK_EQ(loc_, kConstant);
+ DCHECK_EQ(loc_, kI32Const);
return i32_const_;
}
-
- Register reg() const {
+ Register gp_reg() const { return reg().gp(); }
+ DoubleRegister fp_reg() const { return reg().fp(); }
+ LiftoffRegister reg() const {
DCHECK_EQ(loc_, kRegister);
return reg_;
}
+ RegClass reg_class() const { return reg().reg_class(); }
void MakeStack() { loc_ = kStack; }
@@ -132,10 +94,11 @@ class LiftoffAssembler : public TurboAssembler {
ValueType type_;
union {
- Register reg_; // used if loc_ == kRegister
- uint32_t i32_const_; // used if loc_ == kConstant
+ LiftoffRegister reg_; // used if loc_ == kRegister
+ uint32_t i32_const_; // used if loc_ == kI32Const
};
};
+
static_assert(IS_TRIVIALLY_COPYABLE(VarState),
"VarState should be trivially copyable");
@@ -147,80 +110,102 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Improve memory management here; avoid std::vector.
std::vector<VarState> stack_state;
- RegList used_registers = 0;
- // TODO(clemensh): Replace this by CountLeadingZeros(kGpCacheRegs) once that
- // method is constexpr.
- static constexpr int kMaxRegisterCode = 7;
- uint32_t register_use_count[kMaxRegisterCode + 1] = {0};
+ LiftoffRegList used_registers;
+ uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
+ LiftoffRegList last_spilled_regs;
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0;
- Register last_spilled_reg = Register::from_code<0>();
-
- // InitMerge: Initialize this CacheState from the {source} cache state, but
- // make sure that other code paths can still jump here (i.e. avoid constants
- // in the locals or the merge region as specified by {arity}).
- // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
- void InitMerge(const CacheState& source, uint32_t num_locals,
- uint32_t arity);
- void Steal(CacheState& source);
+ bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return has_unused_register(candidates, pinned);
+ }
- void Split(const CacheState& source);
+ bool has_unused_register(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) const {
+ LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ return !available_regs.is_empty();
+ }
- bool has_unused_register(PinnedRegisterScope pinned_scope = {}) const {
- RegList available_regs =
- kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
- return available_regs != 0;
+ LiftoffRegister unused_register(RegClass rc,
+ LiftoffRegList pinned = {}) const {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return unused_register(candidates);
}
- Register unused_register(PinnedRegisterScope pinned_scope = {}) const {
- RegList available_regs =
- kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
- Register reg =
- Register::from_code(base::bits::CountTrailingZeros(available_regs));
- DCHECK_EQ(0, used_registers & reg.bit());
- return reg;
+ LiftoffRegister unused_register(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) const {
+ LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ return available_regs.GetFirstRegSet();
}
- void inc_used(Register reg) {
- used_registers |= reg.bit();
- DCHECK_GE(kMaxRegisterCode, reg.code());
- ++register_use_count[reg.code()];
+ void inc_used(LiftoffRegister reg) {
+ used_registers.set(reg);
+ DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
+ ++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
- bool dec_used(Register reg) {
+ bool dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
- DCHECK_GE(kMaxRegisterCode, reg.code());
- if (--register_use_count[reg.code()] == 0) {
- used_registers &= ~reg.bit();
- return true;
- }
- return false;
+ int code = reg.liftoff_code();
+ DCHECK_LT(0, register_use_count[code]);
+ if (--register_use_count[code] != 0) return false;
+ used_registers.clear(reg);
+ return true;
}
- bool is_used(Register reg) const {
- DCHECK_GE(kMaxRegisterCode, reg.code());
- bool used = used_registers & reg.bit();
- DCHECK_EQ(used, register_use_count[reg.code()] != 0);
+ bool is_used(LiftoffRegister reg) const {
+ bool used = used_registers.has(reg);
+ DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
}
- bool is_free(Register reg) const { return !is_used(reg); }
+ uint32_t get_use_count(LiftoffRegister reg) const {
+ DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
+ return register_use_count[reg.liftoff_code()];
+ }
+
+ void clear_used(LiftoffRegister reg) {
+ register_use_count[reg.liftoff_code()] = 0;
+ used_registers.clear(reg);
+ }
- uint32_t stack_height() const {
- return static_cast<uint32_t>(stack_state.size());
+ bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
+
+ void reset_used_registers() {
+ used_registers = {};
+ memset(register_use_count, 0, sizeof(register_use_count));
+ }
+
+ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) {
+ LiftoffRegList unpinned = candidates.MaskOut(pinned);
+ DCHECK(!unpinned.is_empty());
+ // This method should only be called if none of the candidates is free.
+ DCHECK(unpinned.MaskOut(used_registers).is_empty());
+ LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
+ if (unspilled.is_empty()) {
+ unspilled = unpinned;
+ last_spilled_regs = {};
+ }
+ LiftoffRegister reg = unspilled.GetFirstRegSet();
+ last_spilled_regs.set(reg);
+ return reg;
}
- Register GetNextSpillReg(PinnedRegisterScope scope = {}) {
- uint32_t mask = (1u << (last_spilled_reg.code() + 1)) - 1;
- RegList unpinned_regs = kGpCacheRegs & ~scope.pinned_regs();
- DCHECK_NE(0, unpinned_regs);
- RegList remaining_regs = unpinned_regs & ~mask;
- if (!remaining_regs) remaining_regs = unpinned_regs;
- last_spilled_reg =
- Register::from_code(base::bits::CountTrailingZeros(remaining_regs));
- return last_spilled_reg;
+ // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
+ void InitMerge(const CacheState& source, uint32_t num_locals,
+ uint32_t arity);
+
+ void Steal(CacheState& source);
+
+ void Split(const CacheState& source);
+
+ uint32_t stack_height() const {
+ return static_cast<uint32_t>(stack_state.size());
}
private:
@@ -233,27 +218,39 @@ class LiftoffAssembler : public TurboAssembler {
explicit LiftoffAssembler(Isolate* isolate);
~LiftoffAssembler();
- Register GetBinaryOpTargetRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister GetBinaryOpTargetRegister(RegClass,
+ LiftoffRegList pinned = {});
+ LiftoffRegister GetUnaryOpTargetRegister(RegClass,
+ LiftoffRegList pinned = {});
- Register PopToRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister PopToRegister(RegClass, LiftoffRegList pinned = {});
- void PushRegister(ValueType type, Register reg) {
+ void PushRegister(ValueType type, LiftoffRegister reg) {
+ DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg);
}
- uint32_t GetNumUses(Register reg) const {
- DCHECK_GE(CacheState::kMaxRegisterCode, reg.code());
- return cache_state_.register_use_count[reg.code()];
+ void SpillRegister(LiftoffRegister);
+
+ uint32_t GetNumUses(LiftoffRegister reg) {
+ return cache_state_.get_use_count(reg);
}
- Register GetUnusedRegister(RegClass rc,
- PinnedRegisterScope pinned_regs = {}) {
- DCHECK_EQ(kGpReg, rc);
- if (cache_state_.has_unused_register(pinned_regs)) {
- return cache_state_.unused_register(pinned_regs);
+ // Get an unused register for class {rc}, potentially spilling to free one.
+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ DCHECK(rc == kGpReg || rc == kFpReg);
+ LiftoffRegList candidates = GetCacheRegList(rc);
+ return GetUnusedRegister(candidates, pinned);
+ }
+
+ // Get an unused register of {candidates}, potentially spilling to free one.
+ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned = {}) {
+ if (cache_state_.has_unused_register(candidates, pinned)) {
+ return cache_state_.unused_register(candidates, pinned);
}
- return SpillOneRegister(rc, pinned_regs);
+ return SpillOneRegister(candidates, pinned);
}
void DropStackSlot(VarState* slot) {
@@ -271,40 +268,102 @@ class LiftoffAssembler : public TurboAssembler {
void Spill(uint32_t index);
void SpillLocals();
+ void SpillAllRegisters();
+
+ // Load parameters into the right registers / stack slots for the call.
+ void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Process return values of the call.
+ void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
- inline void ReserveStackSpace(uint32_t);
+ inline void ReserveStackSpace(uint32_t bytes);
- inline void LoadConstant(Register, WasmValue);
+ inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
- inline void Load(Register dst, Register src_addr, uint32_t offset_imm,
- int size, PinnedRegisterScope = {});
- inline void Store(Register dst_addr, uint32_t offset_imm, Register src,
- int size, PinnedRegisterScope = {});
- inline void LoadCallerFrameSlot(Register, uint32_t caller_slot_idx);
+ inline void FillContextInto(Register dst);
+ inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
+ uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc = nullptr);
+ inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister src, StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc = nullptr);
+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
- inline void MoveToReturnRegister(Register);
+ inline void MoveToReturnRegister(LiftoffRegister);
+ // TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
+ inline void Move(LiftoffRegister dst, LiftoffRegister src);
- inline void Spill(uint32_t index, Register);
+ inline void Spill(uint32_t index, LiftoffRegister);
inline void Spill(uint32_t index, WasmValue);
- inline void Fill(Register, uint32_t index);
+ inline void Fill(LiftoffRegister, uint32_t index);
+ // i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
+
+ // i32 unops.
+ inline bool emit_i32_eqz(Register dst, Register src);
+ inline bool emit_i32_clz(Register dst, Register src);
+ inline bool emit_i32_ctz(Register dst, Register src);
+ inline bool emit_i32_popcnt(Register dst, Register src);
+
+ inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+
+ inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+
+ inline void emit_i32_test(Register);
+ inline void emit_i32_compare(Register, Register);
+ inline void emit_jump(Label*);
+ inline void emit_cond_jump(Condition, Label*);
- inline void JumpIfZero(Register, Label*);
+ inline void StackCheck(Label* ool_code);
- // Platform-specific constant.
- static constexpr RegList kGpCacheRegs = kLiftoffAssemblerGpCacheRegs;
+ inline void CallTrapCallbackForTesting();
+
+ inline void AssertUnreachable(AbortReason reason);
+
+ // Push a value to the stack (will become a caller frame slot).
+ inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
+ inline void PushCallerFrameSlot(LiftoffRegister reg);
+ inline void PushRegisters(LiftoffRegList);
+ inline void PopRegisters(LiftoffRegList);
+
+ inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
+
+ // Push arguments on the stack (in the caller frame), then align the stack.
+ // The address of the last argument will be stored to {arg_addr_dst}. Previous
+ // arguments will be located at pointer sized buckets above that address.
+ inline void PrepareCCall(uint32_t num_params, const Register* args);
+ inline void SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params);
+ inline void SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx, uint32_t num_params);
+ inline void CallC(ExternalReference ext_ref, uint32_t num_params);
+
+ inline void CallNativeWasmCode(Address addr);
+
+ inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
+
+ // Reserve space in the current frame, store address to space in {addr}.
+ inline void AllocateStackSlot(Register addr, uint32_t size);
+ inline void DeallocateStackSlot(uint32_t size);
////////////////////////////////////
// End of platform-specific part. //
@@ -314,7 +373,6 @@ class LiftoffAssembler : public TurboAssembler {
void set_num_locals(uint32_t num_locals);
uint32_t GetTotalFrameSlotCount() const;
- size_t GetSafepointTableOffset() const { return 0; }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
@@ -332,12 +390,7 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
private:
- static_assert(
- base::bits::CountPopulation(kGpCacheRegs) >= 2,
- "We need at least two cache registers to execute binary operations");
-
uint32_t num_locals_ = 0;
- uint32_t stack_space_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
union {
ValueType local_types_[kInlineLocalTypes];
@@ -347,9 +400,12 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
- Register SpillOneRegister(RegClass, PinnedRegisterScope = {});
+ LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned);
};
+std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index a0aea7503a..255ee0347e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -5,11 +5,13 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/memory-tracing.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,7 +20,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto kConstant = LiftoffAssembler::VarState::kConstant;
+constexpr auto kI32Const = LiftoffAssembler::VarState::kI32Const;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -37,9 +39,13 @@ namespace {
class MovableLabel {
public:
Label* get() { return label_.get(); }
+ MovableLabel() : MovableLabel(new Label()) {}
+
+ static MovableLabel None() { return MovableLabel(nullptr); }
private:
- std::unique_ptr<Label> label_ = base::make_unique<Label>();
+ std::unique_ptr<Label> label_;
+ explicit MovableLabel(Label* label) : label_(label) {}
};
#else
// On all other platforms, just store the Label directly.
@@ -47,6 +53,8 @@ class MovableLabel {
public:
Label* get() { return &label_; }
+ static MovableLabel None() { return MovableLabel(); }
+
private:
Label label_;
};
@@ -62,18 +70,68 @@ class LiftoffCompiler {
using Value = ValueBase;
+ struct ElseState {
+ MovableLabel label;
+ LiftoffAssembler::CacheState state;
+ };
+
struct Control : public ControlWithNamedConstructors<Control, Value> {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
+ std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
};
using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ struct OutOfLineCode {
+ MovableLabel label;
+ MovableLabel continuation;
+ Builtins::Name builtin;
+ wasm::WasmCodePosition position;
+ LiftoffRegList regs_to_save;
+ uint32_t pc; // for trap handler.
+
+ // Named constructors:
+ static OutOfLineCode Trap(Builtins::Name b, wasm::WasmCodePosition pos,
+ uint32_t pc) {
+ return {{}, {}, b, pos, {}, pc};
+ }
+ static OutOfLineCode StackCheck(wasm::WasmCodePosition pos,
+ LiftoffRegList regs) {
+ return {{}, MovableLabel::None(), Builtins::kWasmStackGuard, pos, regs,
+ 0};
+ }
+ };
+
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env)
- : asm_(liftoff_asm), call_desc_(call_desc), env_(env) {}
+ compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env,
+ compiler::RuntimeExceptionSupport runtime_exception_support,
+ SourcePositionTableBuilder* source_position_table_builder,
+ std::vector<trap_handler::ProtectedInstructionData>*
+ protected_instructions,
+ Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
+ : asm_(liftoff_asm),
+ call_desc_(call_desc),
+ env_(env),
+ min_size_(env_->module->initial_pages * wasm::kWasmPageSize),
+ max_size_((env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages) *
+ wasm::kWasmPageSize),
+ runtime_exception_support_(runtime_exception_support),
+ source_position_table_builder_(source_position_table_builder),
+ protected_instructions_(protected_instructions),
+ compilation_zone_(compilation_zone),
+ codegen_zone_(codegen_zone),
+ safepoint_table_builder_(compilation_zone_) {
+ // Check for overflow in max_size_.
+ DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages} *
+ wasm::kWasmPageSize);
+ }
bool ok() const { return ok_; }
@@ -84,16 +142,27 @@ class LiftoffCompiler {
BindUnboundLabels(decoder);
}
+ int GetSafepointTableOffset() const {
+ return safepoint_table_builder_.GetCodeOffset();
+ }
+
void BindUnboundLabels(Decoder* decoder) {
-#ifndef DEBUG
- return;
-#endif
+#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
- Label* label = decoder->control_at(i)->label.get();
+ Control* c = decoder->control_at(i);
+ Label* label = c->label.get();
if (!label->is_bound()) __ bind(label);
+ if (c->else_state) {
+ Label* else_label = c->else_state->label.get();
+ if (!else_label->is_bound()) __ bind(else_label);
+ }
+ }
+ for (auto& ool : out_of_line_code_) {
+ if (!ool.label.get()->is_bound()) __ bind(ool.label.get());
}
+#endif
}
void CheckStackSizeLimit(Decoder* decoder) {
@@ -112,21 +181,75 @@ class LiftoffCompiler {
}
}
+ void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
+ ValueType type = __ local_type(param_idx);
+ RegClass rc = reg_class_for(type);
+ compiler::LinkageLocation param_loc =
+ call_desc_->GetInputLocation(input_location);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ int reg_code = param_loc.AsRegister();
+ LiftoffRegister reg =
+ rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
+ : LiftoffRegister(DoubleRegister::from_code(reg_code));
+ LiftoffRegList cache_regs =
+ rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+ if (cache_regs.has(reg)) {
+ // This is a cache register, just use it.
+ __ PushRegister(type, reg);
+ return;
+ }
+ // Move to a cache register.
+ LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
+ __ Move(cache_reg, reg);
+ __ PushRegister(type, reg);
+ return;
+ }
+ if (param_loc.IsCallerFrameSlot()) {
+ LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
+ __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
+ __ PushRegister(type, tmp_reg);
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void StackCheck(wasm::WasmCodePosition position) {
+ if (FLAG_wasm_no_stack_checks || !runtime_exception_support_) return;
+ out_of_line_code_.push_back(
+ OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
+ OutOfLineCode& ool = out_of_line_code_.back();
+ __ StackCheck(ool.label.get());
+ __ bind(ool.continuation.get());
+ }
+
void StartFunctionBody(Decoder* decoder, Control* block) {
if (!kLiftoffAssemblerImplementedOnThisPlatform) {
unsupported(decoder, "platform");
return;
}
__ EnterFrame(StackFrame::WASM_COMPILED);
- __ ReserveStackSpace(__ GetTotalFrameSlotCount());
+ __ set_has_frame(true);
+ __ ReserveStackSpace(LiftoffAssembler::kStackSlotSize *
+ __ GetTotalFrameSlotCount());
// Parameter 0 is the wasm context.
uint32_t num_params =
static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- // We can currently only handle i32 parameters and locals.
- if (__ local_type(i) != kWasmI32) {
- unsupported(decoder, "non-i32 param/local");
- return;
+ switch (__ local_type(i)) {
+ case kWasmI32:
+ case kWasmF32:
+ // supported.
+ break;
+ case kWasmI64:
+ unsupported(decoder, "i64 param/local");
+ return;
+ case kWasmF64:
+ unsupported(decoder, "f64 param/local");
+ return;
+ default:
+ unsupported(decoder, "exotic param/local");
+ return;
}
}
// Input 0 is the call target, the context is at 1.
@@ -140,87 +263,153 @@ class LiftoffCompiler {
__ SpillContext(context_reg);
uint32_t param_idx = 0;
for (; param_idx < num_params; ++param_idx) {
- constexpr uint32_t kFirstActualParamIndex = kContextParameterIndex + 1;
- ValueType type = __ local_type(param_idx);
- compiler::LinkageLocation param_loc =
- call_desc_->GetInputLocation(param_idx + kFirstActualParamIndex);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- Register param_reg = Register::from_code(param_loc.AsRegister());
- if (param_reg.bit() & __ kGpCacheRegs) {
- // This is a cache register, just use it.
- __ PushRegister(type, param_reg);
- } else {
- // No cache register. Push to the stack.
- __ Spill(param_idx, param_reg);
- __ cache_state()->stack_state.emplace_back(type);
- }
- } else if (param_loc.IsCallerFrameSlot()) {
- Register tmp_reg = __ GetUnusedRegister(reg_class_for(type));
- __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
- __ PushRegister(type, tmp_reg);
- } else {
- UNIMPLEMENTED();
- }
+ constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
+ ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
}
+ // Set to a gp register, to mark this uninitialized.
+ LiftoffRegister zero_double_reg(Register::from_code<0>());
+ DCHECK(zero_double_reg.is_gp());
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
+ case kWasmF32:
+ if (zero_double_reg.is_gp()) {
+ // Note: This might spill one of the registers used to hold
+ // parameters.
+ zero_double_reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(zero_double_reg, WasmValue(0.f));
+ }
+ __ PushRegister(kWasmF32, zero_double_reg);
+ break;
default:
UNIMPLEMENTED();
}
}
block->label_state.stack_base = __ num_locals();
+
+ // The function-prologue stack check is associated with position 0, which
+ // is never a position of any instruction in the function.
+ StackCheck(0);
+
DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
CheckStackSizeLimit(decoder);
}
- void FinishFunction(Decoder* decoder) {}
+ void GenerateOutOfLineCode(OutOfLineCode& ool) {
+ __ bind(ool.label.get());
+ const bool is_stack_check = ool.builtin == Builtins::kWasmStackGuard;
+ if (!runtime_exception_support_) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // In this mode, we never generate stack checks.
+ DCHECK(!is_stack_check);
+ __ CallTrapCallbackForTesting();
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ Ret();
+ return;
+ }
+
+ if (!is_stack_check && env_->use_trap_handler) {
+ uint32_t pc = static_cast<uint32_t>(__ pc_offset());
+ DCHECK_EQ(pc, __ pc_offset());
+ protected_instructions_->emplace_back(
+ trap_handler::ProtectedInstructionData{ool.pc, pc});
+ }
+
+ if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(ool.position), false);
+ __ Call(__ isolate()->builtins()->builtin_handle(ool.builtin),
+ RelocInfo::CODE_TARGET);
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
+ if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
+ if (is_stack_check) {
+ __ emit_jump(ool.continuation.get());
+ } else {
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ }
+ }
+
+ void FinishFunction(Decoder* decoder) {
+ for (OutOfLineCode& ool : out_of_line_code_) {
+ GenerateOutOfLineCode(ool);
+ }
+ safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
+ }
void OnFirstError(Decoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
}
- void Block(Decoder* decoder, Control* new_block) {
- // Note: This is called for blocks and loops.
- DCHECK_EQ(new_block, decoder->control_at(0));
+ void NextInstruction(Decoder* decoder, WasmOpcode) {
+ TraceCacheState(decoder);
+ }
+
+ void Block(Decoder* decoder, Control* block) {
+ block->label_state.stack_base = __ cache_state()->stack_height();
+ }
- new_block->label_state.stack_base = __ cache_state()->stack_height();
+ void Loop(Decoder* decoder, Control* loop) {
+ loop->label_state.stack_base = __ cache_state()->stack_height();
- if (new_block->is_loop()) {
- // Before entering a loop, spill all locals to the stack, in order to free
- // the cache registers, and to avoid unnecessarily reloading stack values
- // into registers at branches.
- // TODO(clemensh): Come up with a better strategy here, involving
- // pre-analysis of the function.
- __ SpillLocals();
+ // Before entering a loop, spill all locals to the stack, in order to free
+ // the cache registers, and to avoid unnecessarily reloading stack values
+ // into registers at branches.
+ // TODO(clemensh): Come up with a better strategy here, involving
+ // pre-analysis of the function.
+ __ SpillLocals();
- // Loop labels bind at the beginning of the block, block labels at the
- // end.
- __ bind(new_block->label.get());
+ // Loop labels bind at the beginning of the block.
+ __ bind(loop->label.get());
- new_block->label_state.Split(*__ cache_state());
- }
- }
+ // Save the current cache state for the merge when jumping to this loop.
+ loop->label_state.Split(*__ cache_state());
- void Loop(Decoder* decoder, Control* block) { Block(decoder, block); }
+ // Execute a stack check in the loop header.
+ StackCheck(decoder->position());
+ }
void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
+
void If(Decoder* decoder, const Value& cond, Control* if_block) {
- unsupported(decoder, "if");
+ DCHECK_EQ(if_block, decoder->control_at(0));
+ DCHECK(if_block->is_if());
+
+ if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
+ return unsupported(decoder, "multi-value if");
+
+ // Allocate the else state.
+ if_block->else_state = base::make_unique<ElseState>();
+
+ // Test the condition, jump to else if zero.
+ Register value = __ PopToRegister(kGpReg).gp();
+ __ emit_i32_test(value);
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get());
+
+ if_block->label_state.stack_base = __ cache_state()->stack_height();
+ // Store the state (after popping the value) for executing the else branch.
+ if_block->else_state->state.Split(*__ cache_state());
}
void FallThruTo(Decoder* decoder, Control* c) {
if (c->end_merge.reached) {
__ MergeFullStackWith(c->label_state);
+ } else if (c->is_onearmed_if()) {
+ c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ c->br_merge()->arity);
+ __ MergeFullStackWith(c->label_state);
} else {
c->label_state.Split(*__ cache_state());
}
+ TraceCacheState(decoder);
}
void PopControl(Decoder* decoder, Control* c) {
@@ -234,36 +423,148 @@ class LiftoffCompiler {
void EndControl(Decoder* decoder, Control* c) {}
+ void GenerateCCall(Register res_reg, uint32_t num_args,
+ const Register* arg_regs, ExternalReference ext_ref) {
+ static constexpr int kNumReturns = 1;
+ static constexpr int kMaxArgs = 2;
+ static constexpr MachineType kReps[]{
+ MachineType::Uint32(), MachineType::Pointer(), MachineType::Pointer()};
+ static_assert(arraysize(kReps) == kNumReturns + kMaxArgs, "mismatch");
+ DCHECK_LE(num_args, kMaxArgs);
+
+ MachineSignature sig(kNumReturns, num_args, kReps);
+ compiler::CallDescriptor* desc =
+ compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
+
+ // Before making a call, spill all cache registers.
+ __ SpillAllRegisters();
+
+ // Store arguments on our stack, then align the stack for calling to C.
+ uint32_t num_params = static_cast<uint32_t>(desc->ParameterCount());
+ __ PrepareCCall(num_params, arg_regs);
+
+ // Set parameters (in sp[0], sp[8], ...).
+ uint32_t num_stack_params = 0;
+ for (uint32_t param = 0; param < num_params; ++param) {
+ constexpr size_t kInputShift = 1; // Input 0 is the call target.
+
+ compiler::LinkageLocation loc =
+ desc->GetInputLocation(param + kInputShift);
+ if (loc.IsRegister()) {
+ Register reg = Register::from_code(loc.AsRegister());
+ // Load address of that parameter to the register.
+ __ SetCCallRegParamAddr(reg, param, num_params);
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ __ SetCCallStackParamAddr(num_stack_params, param, num_params);
+ ++num_stack_params;
+ }
+ }
+
+ // Now execute the call.
+ __ CallC(ext_ref, num_params);
+
+ // Load return value.
+ compiler::LinkageLocation return_loc = desc->GetReturnLocation(0);
+ DCHECK(return_loc.IsRegister());
+ Register return_reg = Register::from_code(return_loc.AsRegister());
+ if (return_reg != res_reg) {
+ __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
+ }
+ }
+
+ void I32UnOp(bool (LiftoffAssembler::*emit_fn)(Register, Register),
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetUnaryOpTargetRegister(kGpReg));
+ LiftoffRegister src_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ if (!emit_fn || !(asm_->*emit_fn)(dst_reg.gp(), src_reg.gp())) {
+ ExternalReference ext_ref = fallback_fn(asm_->isolate());
+ Register args[] = {src_reg.gp()};
+ GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
+ }
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
- unsupported(decoder, "unary operation");
+#define CASE_UNOP(opcode, type, fn, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ type##UnOp(&LiftoffAssembler::emit_##fn, ext_ref_fn); \
+ break;
+ switch (opcode) {
+ CASE_UNOP(I32Eqz, I32, i32_eqz, nullptr)
+ CASE_UNOP(I32Clz, I32, i32_clz, nullptr)
+ CASE_UNOP(I32Ctz, I32, i32_ctz, nullptr)
+ CASE_UNOP(I32Popcnt, I32, i32_popcnt,
+ &ExternalReference::wasm_word32_popcnt)
+ default:
+ return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ }
+#undef CASE_UNOP
+ }
+
+ void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
+ Register)) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
+ (asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
+ void I32CCallBinOp(ExternalReference ext_ref) {
+ LiftoffRegList pinned;
+ LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
+ Register args[] = {lhs_reg.gp(), rhs_reg.gp()};
+ GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
+ __ PushRegister(kWasmI32, dst_reg);
+ }
+
+ void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
+ DoubleRegister,
+ DoubleRegister)) {
+ LiftoffRegList pinned;
+ LiftoffRegister target_reg =
+ pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
+ LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kFpReg, pinned));
+ LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned);
+ (asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
+ __ PushRegister(kWasmF32, target_reg);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
- void (LiftoffAssembler::*emit_fn)(Register, Register, Register);
-#define CASE_EMIT_FN(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
- emit_fn = &LiftoffAssembler::emit_##fn; \
+#define CASE_BINOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return type##BinOp(&LiftoffAssembler::emit_##fn);
+#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
break;
switch (opcode) {
- CASE_EMIT_FN(I32Add, i32_add)
- CASE_EMIT_FN(I32Sub, i32_sub)
- CASE_EMIT_FN(I32Mul, i32_mul)
- CASE_EMIT_FN(I32And, i32_and)
- CASE_EMIT_FN(I32Ior, i32_or)
- CASE_EMIT_FN(I32Xor, i32_xor)
+ CASE_BINOP(I32Add, I32, i32_add)
+ CASE_BINOP(I32Sub, I32, i32_sub)
+ CASE_BINOP(I32Mul, I32, i32_mul)
+ CASE_BINOP(I32And, I32, i32_and)
+ CASE_BINOP(I32Ior, I32, i32_or)
+ CASE_BINOP(I32Xor, I32, i32_xor)
+ CASE_BINOP(I32Shl, I32, i32_shl)
+ CASE_BINOP(I32ShrS, I32, i32_sar)
+ CASE_BINOP(I32ShrU, I32, i32_shr)
+ CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
+ CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
+ CASE_BINOP(F32Add, F32, f32_add)
+ CASE_BINOP(F32Sub, F32, f32_sub)
+ CASE_BINOP(F32Mul, F32, f32_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_EMIT_FN
-
- LiftoffAssembler::PinnedRegisterScope pinned_regs;
- Register target_reg = pinned_regs.pin(__ GetBinaryOpTargetRegister(kGpReg));
- Register rhs_reg = pinned_regs.pin(__ PopToRegister(kGpReg, pinned_regs));
- Register lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
- (asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
- __ PushRegister(kWasmI32, target_reg);
+#undef CASE_BINOP
+#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
@@ -274,9 +575,14 @@ class LiftoffCompiler {
void I64Const(Decoder* decoder, Value* result, int64_t value) {
unsupported(decoder, "i64.const");
}
+
void F32Const(Decoder* decoder, Value* result, float value) {
- unsupported(decoder, "f32.const");
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmF32, reg);
+ CheckStackSizeLimit(decoder);
}
+
void F64Const(Decoder* decoder, Value* result, double value) {
unsupported(decoder, "f64.const");
}
@@ -295,37 +601,58 @@ class LiftoffCompiler {
}
if (!values.is_empty()) {
if (values.size() > 1) return unsupported(decoder, "multi-return");
- // TODO(clemensh): Handle other types.
- if (values[0].type != kWasmI32)
- return unsupported(decoder, "non-i32 return");
- Register reg = __ PopToRegister(kGpReg);
+ RegClass rc = reg_class_for(values[0].type);
+ LiftoffRegister reg = __ PopToRegister(rc);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
- __ Ret();
+ __ DropStackSlotsAndRet(
+ static_cast<uint32_t>(call_desc_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
auto& slot = __ cache_state()->stack_state[operand.index];
+ DCHECK_EQ(slot.type(), operand.type);
switch (slot.loc()) {
case kRegister:
- __ PushRegister(operand.type, slot.reg());
+ __ PushRegister(slot.type(), slot.reg());
break;
- case kConstant:
+ case kI32Const:
__ cache_state()->stack_state.emplace_back(operand.type,
slot.i32_const());
break;
case kStack: {
auto rc = reg_class_for(operand.type);
- Register reg = __ GetUnusedRegister(rc);
+ LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, operand.index);
- __ PushRegister(operand.type, reg);
- } break;
+ __ PushRegister(slot.type(), reg);
+ break;
+ }
}
CheckStackSizeLimit(decoder);
}
+ void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
+ uint32_t local_index) {
+ auto& state = *__ cache_state();
+ if (dst_slot.is_reg()) {
+ LiftoffRegister slot_reg = dst_slot.reg();
+ if (state.get_use_count(slot_reg) == 1) {
+ __ Fill(dst_slot.reg(), state.stack_height() - 1);
+ return;
+ }
+ state.dec_used(slot_reg);
+ }
+ ValueType type = dst_slot.type();
+ DCHECK_EQ(type, __ local_type(local_index));
+ RegClass rc = reg_class_for(type);
+ LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
+ __ Fill(dst_reg, __ cache_state()->stack_height() - 1);
+ dst_slot = LiftoffAssembler::VarState(type, dst_reg);
+ __ cache_state()->inc_used(dst_reg);
+ }
+
void SetLocal(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
@@ -336,31 +663,13 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case kConstant:
+ case kI32Const:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
- case kStack: {
- switch (target_slot.loc()) {
- case kRegister:
- if (state.register_use_count[target_slot.reg().code()] == 1) {
- __ Fill(target_slot.reg(), state.stack_height() - 1);
- break;
- } else {
- state.dec_used(target_slot.reg());
- // and fall through to use a new register.
- }
- case kConstant:
- case kStack: {
- ValueType type = __ local_type(local_index);
- Register target_reg = __ GetUnusedRegister(reg_class_for(type));
- __ Fill(target_reg, state.stack_height() - 1);
- target_slot = LiftoffAssembler::VarState(type, target_reg);
- state.inc_used(target_reg);
- } break;
- }
+ case kStack:
+ SetLocalFromStackSlot(target_slot, local_index);
break;
- }
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
@@ -380,31 +689,34 @@ class LiftoffCompiler {
const auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32 && global->type != kWasmI64)
return unsupported(decoder, "non-int global");
- LiftoffAssembler::PinnedRegisterScope pinned;
- Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ LiftoffRegList pinned;
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
- Register value =
- pinned.pin(__ GetUnusedRegister(reg_class_for(global->type), pinned));
- int size = 1 << ElementSizeLog2Of(global->type);
- if (size > kPointerSize)
+ LiftoffRegister value =
+ pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
+ LoadType type =
+ global->type == kWasmI32 ? LoadType::kI32Load : LoadType::kI64Load;
+ if (type.size() > kPointerSize)
return unsupported(decoder, "global > kPointerSize");
- __ Load(value, addr, global->offset, size, pinned);
+ __ Load(value, addr, no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
+ CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
- LiftoffAssembler::PinnedRegisterScope pinned;
- Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
+ LiftoffRegList pinned;
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
- Register reg =
- pinned.pin(__ PopToRegister(reg_class_for(global->type), pinned));
- int size = 1 << ElementSizeLog2Of(global->type);
- __ Store(addr, global->offset, reg, size, pinned);
+ LiftoffRegister reg =
+ pinned.set(__ PopToRegister(reg_class_for(global->type), pinned));
+ StoreType type =
+ global->type == kWasmI32 ? StoreType::kI32Store : StoreType::kI64Store;
+ __ Store(addr, no_reg, global->offset, reg, type, pinned);
}
void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); }
@@ -414,7 +726,7 @@ class LiftoffCompiler {
unsupported(decoder, "select");
}
- void Br(Decoder* decoder, Control* target) {
+ void Br(Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
target->br_merge()->arity);
@@ -423,12 +735,17 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
+ void Br(Decoder* decoder, Control* target) {
+ Br(target);
+ }
+
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
- Register value = __ PopToRegister(kGpReg);
- __ JumpIfZero(value, &cont_false);
+ Register value = __ PopToRegister(kGpReg).gp();
+ __ emit_i32_test(value);
+ __ emit_cond_jump(kEqual, &cont_false);
- Br(decoder, target);
+ Br(target);
__ bind(&cont_false);
}
@@ -436,30 +753,221 @@ class LiftoffCompiler {
const Value& key) {
unsupported(decoder, "br_table");
}
+
void Else(Decoder* decoder, Control* if_block) {
- unsupported(decoder, "else");
+ if (if_block->reachable()) __ emit_jump(if_block->label.get());
+ __ bind(if_block->else_state->label.get());
+ __ cache_state()->Steal(if_block->else_state->state);
+ }
+
+ Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
+ DCHECK(!FLAG_wasm_no_bounds_checks);
+ // The pc is needed exactly if trap handlers are enabled.
+ DCHECK_EQ(pc != 0, env_->use_trap_handler);
+
+ out_of_line_code_.push_back(OutOfLineCode::Trap(
+ Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
+ return out_of_line_code_.back().label.get();
+ }
+
+ void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
+ wasm::WasmCodePosition position, LiftoffRegList pinned) {
+ DCHECK(!env_->use_trap_handler);
+ if (FLAG_wasm_no_bounds_checks) return;
+
+ Label* trap_label = AddOutOfLineTrap(position);
+
+ if (access_size > max_size_ || offset > max_size_ - access_size) {
+ // The access will be out of bounds, even for the largest memory.
+ __ emit_jump(trap_label);
+ return;
+ }
+ uint32_t end_offset = offset + access_size - 1;
+
+ // If the end offset is larger than the smallest memory, dynamically check
+ // the end offset against the actual memory size, which is not known at
+ // compile time. Otherwise, only one check is required (see below).
+ LiftoffRegister end_offset_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
+ __ LoadConstant(end_offset_reg, WasmValue(end_offset));
+ if (end_offset >= min_size_) {
+ __ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ }
+
+ // Just reuse the end_offset register for computing the effective size.
+ LiftoffRegister effective_size_reg = end_offset_reg;
+ __ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
+ end_offset_reg.gp());
+
+ __ emit_i32_compare(index, effective_size_reg.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ }
+
+ void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
+ Register index, uint32_t offset,
+ WasmCodePosition position) {
+ // Before making the runtime call, spill all cache registers.
+ __ SpillAllRegisters();
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ // Get one register for computing the address (offset + index).
+ LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Compute offset+index in address.
+ __ LoadConstant(address, WasmValue(offset));
+ __ emit_i32_add(address.gp(), address.gp(), index);
+
+ // Get a register to hold the stack slot for wasm::MemoryTracingInfo.
+ LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Allocate stack slot for wasm::MemoryTracingInfo.
+ __ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo));
+
+ // Now store all information into the wasm::MemoryTracingInfo struct.
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address),
+ address, StoreType::kI32Store, pinned);
+ __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store),
+ address, StoreType::kI32Store8, pinned);
+ __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
+ __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
+ address, StoreType::kI32Store8, pinned);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(position), false);
+
+ Register args[] = {info.gp()};
+ GenerateRuntimeCall(arraysize(args), args);
+ }
+
+ void GenerateRuntimeCall(int num_args, Register* args) {
+ compiler::CallDescriptor* desc =
+ compiler::Linkage::GetRuntimeCallDescriptor(
+ compilation_zone_, Runtime::kWasmTraceMemory, num_args,
+ compiler::Operator::kNoProperties,
+ compiler::CallDescriptor::kNoFlags);
+ // Currently, only one argument is supported. More arguments require some
+ // caution for the parallel register moves (reuse StackTransferRecipe).
+ DCHECK_EQ(1, num_args);
+ constexpr size_t kInputShift = 1; // Input 0 is the call target.
+ compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
+ if (param_loc.IsRegister()) {
+ Register reg = Register::from_code(param_loc.AsRegister());
+ __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
+ } else {
+ DCHECK(param_loc.IsCallerFrameSlot());
+ __ PushCallerFrameSlot(LiftoffRegister(args[0]));
+ }
+
+ // Allocate the codegen zone if not done before.
+ if (!*codegen_zone_) {
+ codegen_zone_->reset(
+ new Zone(__ isolate()->allocator(), "LiftoffCodegenZone"));
+ }
+ __ CallRuntime(codegen_zone_->get(), Runtime::kWasmTraceMemory);
+ __ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo));
}
- void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
- const MemoryAccessOperand<validate>& operand, const Value& index,
- Value* result) {
- unsupported(decoder, "memory load");
+
+ void LoadMem(Decoder* decoder, LoadType type,
+ const MemoryAccessOperand<validate>& operand,
+ const Value& index_val, Value* result) {
+ ValueType value_type = type.value_type();
+ if (value_type != kWasmI32 && value_type != kWasmF32)
+ return unsupported(decoder, "unsupported load type");
+ LiftoffRegList pinned;
+ Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
+ if (!env_->use_trap_handler) {
+ // Emit an explicit bounds check.
+ BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
+ pinned);
+ }
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ RegClass rc = reg_class_for(value_type);
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
+ uint32_t protected_load_pc = 0;
+ __ Load(value, addr, index, operand.offset, type, pinned,
+ &protected_load_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(), protected_load_pc);
+ }
+ __ PushRegister(value_type, value);
+ CheckStackSizeLimit(decoder);
+
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(false, type.mem_type().representation(), index,
+ operand.offset, decoder->position());
+ }
}
- void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
+
+ void StoreMem(Decoder* decoder, StoreType type,
const MemoryAccessOperand<validate>& operand,
- const Value& index, const Value& value) {
- unsupported(decoder, "memory store");
+ const Value& index_val, const Value& value_val) {
+ ValueType value_type = type.value_type();
+ if (value_type != kWasmI32 && value_type != kWasmF32)
+ return unsupported(decoder, "unsupported store type");
+ RegClass rc = reg_class_for(value_type);
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(rc));
+ Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
+ if (!env_->use_trap_handler) {
+ // Emit an explicit bounds check.
+ BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
+ pinned);
+ }
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ uint32_t protected_store_pc = 0;
+ __ Store(addr, index, operand.offset, value, type, pinned,
+ &protected_store_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(), protected_store_pc);
+ }
+ if (FLAG_wasm_trace_memory) {
+ TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
+ decoder->position());
+ }
}
+
void CurrentMemoryPages(Decoder* decoder, Value* result) {
unsupported(decoder, "current_memory");
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
unsupported(decoder, "grow_memory");
}
+
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
- unsupported(decoder, "call");
+ if (operand.sig->return_count() > 1)
+ return unsupported(decoder, "multi-return");
+
+ compiler::CallDescriptor* call_desc =
+ compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+
+ __ PrepareCall(operand.sig, call_desc);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ if (FLAG_wasm_jit_to_native) {
+ // Just encode the function index. This will be patched at instantiation.
+ Address addr = reinterpret_cast<Address>(operand.index);
+ __ CallNativeWasmCode(addr);
+ } else {
+ Handle<Code> target = operand.index < env_->function_code.size()
+ ? env_->function_code[operand.index]
+ : env_->default_function_code;
+ __ Call(target, RelocInfo::CODE_TARGET);
+ }
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_desc);
}
+
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
@@ -500,10 +1008,46 @@ class LiftoffCompiler {
}
private:
- LiftoffAssembler* asm_;
- compiler::CallDescriptor* call_desc_;
- compiler::ModuleEnv* env_;
+ LiftoffAssembler* const asm_;
+ compiler::CallDescriptor* const call_desc_;
+ compiler::ModuleEnv* const env_;
+ // {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
+ const uint32_t min_size_;
+ const uint32_t max_size_;
+ const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
+ std::vector<OutOfLineCode> out_of_line_code_;
+ SourcePositionTableBuilder* const source_position_table_builder_;
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
+ // Zone used to store information during compilation. The result will be
+ // stored independently, such that this zone can die together with the
+ // LiftoffCompiler after compilation.
+ Zone* compilation_zone_;
+ // This zone is allocated when needed, held externally, and survives until
+ // code generation (in FinishCompilation).
+ std::unique_ptr<Zone>* codegen_zone_;
+ SafepointTableBuilder safepoint_table_builder_;
+
+ void TraceCacheState(Decoder* decoder) const {
+#ifdef DEBUG
+ if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
+ OFStream os(stdout);
+ for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
+ --control_depth) {
+ LiftoffAssembler::CacheState* cache_state =
+ control_depth == -1
+ ? asm_->cache_state()
+ : &decoder->control_at(control_depth)->label_state;
+ bool first = true;
+ for (LiftoffAssembler::VarState& slot : cache_state->stack_state) {
+ os << (first ? "" : "-") << slot;
+ first = false;
+ }
+ if (control_depth != -1) PrintF("; ");
+ }
+ os << "\n";
+#endif
+ }
};
} // namespace
@@ -518,9 +1062,15 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module = env_ ? env_->module : nullptr;
auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
+ base::in_place, counters()->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_);
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_,
+ runtime_exception_support_,
+ &liftoff_.source_position_table_builder_,
+ protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
decoder.Decode();
+ liftoff_compile_time_scope.reset();
if (!decoder.interface().ok()) {
// Liftoff compilation failed.
isolate_->counters()->liftoff_unsupported_functions()->Increment();
@@ -539,6 +1089,8 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
// Record the memory cost this unit places on the system until
// it is finalized.
memory_cost_ = liftoff_.asm_.pc_offset();
+ liftoff_.safepoint_table_offset_ =
+ decoder.interface().GetSafepointTableOffset();
isolate_->counters()->liftoff_compiled_functions()->Increment();
return true;
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
new file mode 100644
index 0000000000..bb5ef5be4a
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -0,0 +1,242 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
+#define V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
+
+#include <iosfwd>
+#include <memory>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/base/bits.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+enum RegClass { kNoReg, kGpReg, kFpReg };
+
+// TODO(clemensh): Use a switch once we require C++14 support.
+static inline constexpr RegClass reg_class_for(ValueType type) {
+ return type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
+}
+
+// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
+// for all other values of rc.
+template <RegClass rc>
+using RegForClass = typename std::conditional<
+ rc == kGpReg, Register,
+ typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
+
+// Maximum code of a gp cache register.
+static constexpr int kMaxGpRegCode =
+ 8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
+ base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs);
+// Maximum code of an fp cache register.
+static constexpr int kMaxFpRegCode =
+ 8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
+ base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs);
+// LiftoffRegister encodes both gp and fp in a unified index space.
+// [0 .. kMaxGpRegCode] encodes gp registers,
+// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
+static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
+static constexpr int kAfterMaxLiftoffFpRegCode =
+ kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
+static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
+static_assert(kAfterMaxLiftoffRegCode < 256,
+ "liftoff register codes can be stored in one uint8_t");
+
+class LiftoffRegister {
+ public:
+ explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
+ DCHECK_EQ(reg, gp());
+ }
+ explicit LiftoffRegister(DoubleRegister reg)
+ : LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
+ DCHECK_EQ(reg, fp());
+ }
+
+ static LiftoffRegister from_liftoff_code(int code) {
+ DCHECK_LE(0, code);
+ DCHECK_GT(kAfterMaxLiftoffRegCode, code);
+ return LiftoffRegister(code);
+ }
+
+ static LiftoffRegister from_code(RegClass rc, int code) {
+ switch (rc) {
+ case kGpReg:
+ return LiftoffRegister(Register::from_code(code));
+ case kFpReg:
+ return LiftoffRegister(DoubleRegister::from_code(code));
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
+ constexpr bool is_fp() const {
+ return code_ >= kAfterMaxLiftoffGpRegCode &&
+ code_ < kAfterMaxLiftoffFpRegCode;
+ }
+
+ Register gp() const {
+ DCHECK(is_gp());
+ return Register::from_code(code_);
+ }
+
+ DoubleRegister fp() const {
+ DCHECK(is_fp());
+ return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
+ }
+
+ int liftoff_code() const { return code_; }
+
+ RegClass reg_class() const {
+ DCHECK(is_gp() || is_fp());
+ return is_gp() ? kGpReg : kFpReg;
+ }
+
+ bool operator==(const LiftoffRegister other) const {
+ return code_ == other.code_;
+ }
+ bool operator!=(const LiftoffRegister other) const {
+ return code_ != other.code_;
+ }
+
+ private:
+ uint8_t code_;
+
+ explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
+};
+static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
+ "LiftoffRegister can efficiently be passed by value");
+
+inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
+ return reg.is_gp() ? os << "gp" << reg.gp().code()
+ : os << "fp" << reg.fp().code();
+}
+
+class LiftoffRegList {
+ public:
+ static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
+ static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
+ using storage_t = std::conditional<
+ use_u16, uint16_t,
+ std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
+
+ static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
+ static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
+ << kAfterMaxLiftoffGpRegCode;
+
+ constexpr LiftoffRegList() = default;
+
+ Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
+ DoubleRegister set(DoubleRegister reg) {
+ return set(LiftoffRegister(reg)).fp();
+ }
+
+ LiftoffRegister set(LiftoffRegister reg) {
+ regs_ |= storage_t{1} << reg.liftoff_code();
+ return reg;
+ }
+
+ LiftoffRegister clear(LiftoffRegister reg) {
+ regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ return reg;
+ }
+
+ bool has(LiftoffRegister reg) const {
+ return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
+ }
+
+ constexpr bool is_empty() const { return regs_ == 0; }
+
+ constexpr unsigned GetNumRegsSet() const {
+ return base::bits::CountPopulation(regs_);
+ }
+
+ constexpr LiftoffRegList operator&(const LiftoffRegList other) const {
+ return LiftoffRegList(regs_ & other.regs_);
+ }
+
+ constexpr LiftoffRegList operator~() const {
+ return LiftoffRegList(~regs_ & (kGpMask | kFpMask));
+ }
+
+ constexpr bool operator==(const LiftoffRegList other) const {
+ return regs_ == other.regs_;
+ }
+ constexpr bool operator!=(const LiftoffRegList other) const {
+ return regs_ != other.regs_;
+ }
+
+ LiftoffRegister GetFirstRegSet() const {
+ DCHECK(!is_empty());
+ unsigned first_code = base::bits::CountTrailingZeros(regs_);
+ return LiftoffRegister::from_liftoff_code(first_code);
+ }
+
+ LiftoffRegister GetLastRegSet() const {
+ DCHECK(!is_empty());
+ unsigned last_code =
+ 8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
+ return LiftoffRegister::from_liftoff_code(last_code);
+ }
+
+ LiftoffRegList MaskOut(const LiftoffRegList mask) const {
+ // Masking out is guaranteed to return a correct reg list, hence no checks
+ // needed.
+ return FromBits(regs_ & ~mask.regs_);
+ }
+
+ static LiftoffRegList FromBits(storage_t bits) {
+ DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
+ return LiftoffRegList(bits);
+ }
+
+ template <storage_t bits>
+ static constexpr LiftoffRegList FromBits() {
+ static_assert(bits == (bits & (kGpMask | kFpMask)), "illegal reg list");
+ return LiftoffRegList(bits);
+ }
+
+ template <typename... Regs>
+ static LiftoffRegList ForRegs(Regs... regs) {
+ std::array<LiftoffRegister, sizeof...(regs)> regs_arr{
+ LiftoffRegister(regs)...};
+ LiftoffRegList list;
+ for (LiftoffRegister reg : regs_arr) list.set(reg);
+ return list;
+ }
+
+ private:
+ storage_t regs_ = 0;
+
+ // Unchecked constructor. Only use for valid bits.
+ explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
+};
+static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegList),
+ "LiftoffRegList can be passed by value");
+
+static constexpr LiftoffRegList kGpCacheRegList =
+ LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
+static constexpr LiftoffRegList kFpCacheRegList =
+ LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
+
+static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
+ return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
deleted file mode 100644
index edc52d74b6..0000000000
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index bc3ec1667e..50ab1e82c8 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
deleted file mode 100644
index 1652562515..0000000000
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 2a10d0712e..fd63198e24 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
deleted file mode 100644
index b0d1317166..0000000000
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 55a1475efe..2d62d88dec 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
deleted file mode 100644
index e60dfb923b..0000000000
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// TODO(clemensh): Implement the LiftoffAssembler on this platform.
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1c56971a20..eebb8e4720 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -11,52 +11,168 @@ namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+ UNIMPLEMENTED();
+}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {}
-
-void LiftoffAssembler::SpillContext(Register context) {}
-
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {}
-
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
- uint32_t caller_slot_idx) {}
-
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
-
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
-
-void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
-
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
-
-#define DEFAULT_I32_BINOP(name, internal_name) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) {}
-
-// clang-format off
-DEFAULT_I32_BINOP(add, add)
-DEFAULT_I32_BINOP(sub, sub)
-DEFAULT_I32_BINOP(mul, imul)
-DEFAULT_I32_BINOP(and, and)
-DEFAULT_I32_BINOP(or, or)
-DEFAULT_I32_BINOP(xor, xor)
-// clang-format on
-
-#undef DEFAULT_I32_BINOP
-
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
+ int size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ UNIMPLEMENTED();
+}
+
+#define UNIMPLEMENTED_GP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_GP_UNOP(name) \
+ bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
+ UNIMPLEMENTED(); \
+ }
+#define UNIMPLEMENTED_FP_BINOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ UNIMPLEMENTED(); \
+ }
+
+UNIMPLEMENTED_GP_BINOP(i32_add)
+UNIMPLEMENTED_GP_BINOP(i32_sub)
+UNIMPLEMENTED_GP_BINOP(i32_mul)
+UNIMPLEMENTED_GP_BINOP(i32_and)
+UNIMPLEMENTED_GP_BINOP(i32_or)
+UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_GP_BINOP(i32_shl)
+UNIMPLEMENTED_GP_BINOP(i32_sar)
+UNIMPLEMENTED_GP_BINOP(i32_shr)
+UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_GP_UNOP(i32_clz)
+UNIMPLEMENTED_GP_UNOP(i32_ctz)
+UNIMPLEMENTED_GP_UNOP(i32_popcnt)
+UNIMPLEMENTED_GP_BINOP(ptrsize_add)
+UNIMPLEMENTED_FP_BINOP(f32_add)
+UNIMPLEMENTED_FP_BINOP(f32_sub)
+UNIMPLEMENTED_FP_BINOP(f32_mul)
+
+#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_GP_UNOP
+#undef UNIMPLEMENTED_FP_BINOP
+
+void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
deleted file mode 100644
index ce568eab97..0000000000
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64-defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
-
-#include "src/reglist.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
-static constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 559965ab96..2b3b750fc4 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -19,32 +19,39 @@ namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
// is located at rbp-24.
- constexpr int32_t kStackSlotSize = 8;
constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(rbp, kFirstStackSlotOffset - index * kStackSlotSize);
+ return Operand(
+ rbp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(rbp, -16); }
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C.
+static constexpr Register kCCallLastArgAddrReg = rax;
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
- stack_space_ = space;
- subl(rsp, Immediate(space));
+void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+ DCHECK_LE(bytes, kMaxInt);
+ subp(rsp, Immediate(bytes));
}
-void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
- xorl(reg, reg);
+ xorl(reg.gp(), reg.gp());
} else {
- movl(reg, Immediate(value.to_i32()));
+ movl(reg.gp(), Immediate(value.to_i32()));
}
break;
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
}
@@ -64,54 +71,109 @@ void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
}
-void LiftoffAssembler::Load(Register dst, Register src_addr,
- uint32_t offset_imm, int size,
- PinnedRegisterScope pinned) {
- Operand src_op = Operand(src_addr, offset_imm);
+void LiftoffAssembler::FillContextInto(Register dst) {
+ movp(dst, liftoff::GetContextOperand());
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc) {
+ Operand src_op = offset_reg == no_reg
+ ? Operand(src_addr, offset_imm)
+ : Operand(src_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register src = GetUnusedRegister(kGpReg, pinned);
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
movl(src, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(src, src, offset_reg);
+ }
src_op = Operand(src_addr, src, times_1, 0);
}
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- movl(dst, src_op);
- } else {
- movq(dst, src_op);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ movzxbl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ movsxbl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ movzxwl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ movsxwl(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ movl(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ movq(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ Movss(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
- Register src, int size,
- PinnedRegisterScope pinned) {
- Operand dst_op = Operand(dst_addr, offset_imm);
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
- Register dst = GetUnusedRegister(kGpReg, pinned);
+ Register dst = GetUnusedRegister(kGpReg, pinned).gp();
movl(dst, Immediate(offset_imm));
+ if (offset_reg != no_reg) {
+ emit_ptrsize_add(dst, dst, offset_reg);
+ }
dst_op = Operand(dst_addr, dst, times_1, 0);
}
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- movl(dst_op, src);
- } else {
- movp(dst_op, src);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ movb(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store16:
+ movw(dst_op, src.gp());
+ break;
+ case StoreType::kI32Store:
+ movl(dst_op, src.gp());
+ break;
+ case StoreType::kI64Store:
+ movq(dst_op, src.gp());
+ break;
+ case StoreType::kF32Store:
+ Movss(dst_op, src.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
- constexpr int32_t kStackSlotSize = 8;
- movl(dst, Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
+ Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ movq(dst.gp(), src);
+ } else {
+ Movsd(dst.fp(), src);
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register()) {
- Register reg = GetUnusedRegister(kGpReg);
+ if (cache_state_.has_unused_register(kGpReg)) {
+ LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
@@ -120,24 +182,60 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
-void LiftoffAssembler::MoveToReturnRegister(Register reg) {
- // TODO(clemensh): Handle different types here.
- if (reg != rax) movl(rax, reg);
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg);
+}
+
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
+ // The caller should check that the registers are not equal. For most
+ // occurences, this is already guaranteed, so no need to check within this
+ // method.
+ DCHECK_NE(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ // TODO(clemensh): Handle different sizes here.
+ if (dst.is_gp()) {
+ movq(dst.gp(), src.gp());
+ } else {
+ Movsd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::Spill(uint32_t index, Register reg) {
- // TODO(clemensh): Handle different types here.
- movl(liftoff::GetStackSlot(index), reg);
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+ Operand dst = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ movq(dst, reg.gp());
+ } else {
+ Movsd(dst, reg.fp());
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- // TODO(clemensh): Handle different types here.
- movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32:
+ movl(dst, Immediate(value.to_i32()));
+ break;
+ case kWasmF32:
+ movl(dst, Immediate(value.to_f32_boxed().get_bits()));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(Register reg, uint32_t index) {
- // TODO(clemensh): Handle different types here.
- movl(reg, liftoff::GetStackSlot(index));
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+ Operand src = liftoff::GetStackSlot(index);
+ // TODO(clemensh): Handle different sizes here.
+ if (reg.is_gp()) {
+ movq(reg.gp(), src);
+ } else {
+ Movsd(reg.fp(), src);
+ }
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -176,11 +274,291 @@ COMMUTATIVE_I32_BINOP(or, or)
COMMUTATIVE_I32_BINOP(xor, xor)
// clang-format on
-#undef DEFAULT_I32_BINOP
+#undef COMMUTATIVE_I32_BINOP
+
+namespace liftoff {
+inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
+ Register lhs, Register rhs,
+ void (Assembler::*emit_shift)(Register)) {
+ // If dst is rcx, compute into the scratch register first, then move to rcx.
+ if (dst == rcx) {
+ assm->movl(kScratchRegister, lhs);
+ if (rhs != rcx) assm->movl(rcx, rhs);
+ (assm->*emit_shift)(kScratchRegister);
+ assm->movl(rcx, kScratchRegister);
+ return;
+ }
+
+ // Move rhs into rcx. If rcx is in use, move its content into the scratch
+ // register. If lhs is rcx, lhs is now the scratch register.
+ bool use_scratch = false;
+ if (rhs != rcx) {
+ use_scratch =
+ lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
+ if (use_scratch) assm->movl(kScratchRegister, rcx);
+ if (lhs == rcx) lhs = kScratchRegister;
+ assm->movl(rcx, rhs);
+ }
+
+ // Do the actual shift.
+ if (dst != lhs) assm->movl(dst, lhs);
+ (assm->*emit_shift)(dst);
+
+ // Restore rcx if needed.
+ if (use_scratch) assm->movl(rcx, kScratchRegister);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
+}
+
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
+}
+
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
+}
+
+bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ testl(src, src);
+ setcc(zero, dst);
+ movzxbl(dst, dst);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ testl(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ movl(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get most significant bit set (MSBS).
+ bsrl(dst, src);
+ // CLZ = 31 - MSBS = MSBS ^ 31.
+ xorl(dst, Immediate(31));
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ Label nonzero_input;
+ Label continuation;
+ testl(src, src);
+ j(not_zero, &nonzero_input, Label::kNear);
+ movl(dst, Immediate(32));
+ jmp(&continuation, Label::kNear);
+
+ bind(&nonzero_input);
+ // Get least significant bit set, which equals number of trailing zeros.
+ bsfl(dst, src);
+
+ bind(&continuation);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ if (!CpuFeatures::IsSupported(POPCNT)) return false;
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return true;
+}
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ if (lhs != dst) {
+ leap(dst, Operand(lhs, rhs, times_1, 0));
+ } else {
+ addp(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ addss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ subss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ subss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulss(dst, lhs);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ mulss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
+
+void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
+ cmpl(lhs, rhs);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+ j(cond, label);
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ Register limit = GetUnusedRegister(kGpReg).gp();
+ LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
+ cmpp(rsp, Operand(limit, 0));
+ j(below_equal, ool_code);
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0);
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ TurboAssembler::AssertUnreachable(reason);
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
+ uint32_t src_index) {
+ switch (src.loc()) {
+ case VarState::kStack:
+ pushq(liftoff::GetStackSlot(src_index));
+ break;
+ case VarState::kRegister:
+ PushCallerFrameSlot(src.reg());
+ break;
+ case VarState::kI32Const:
+ pushq(Immediate(src.i32_const()));
+ break;
+ }
+}
+
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
+ if (reg.is_gp()) {
+ pushq(reg.gp());
+ } else {
+ subp(rsp, Immediate(kPointerSize));
+ Movsd(Operand(rsp, 0), reg.fp());
+ }
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ pushq(reg.gp());
+ gp_regs.clear(reg);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ subp(rsp, Immediate(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ Movsd(Operand(rsp, offset), reg.fp());
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ Movsd(reg.fp(), Operand(rsp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) addp(rsp, Immediate(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ popq(reg.gp());
+ gp_regs.clear(reg);
+ }
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kPointerSize));
+}
+
+void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+ for (size_t param = 0; param < num_params; ++param) {
+ pushq(args[param]);
+ }
+ movq(liftoff::kCCallLastArgAddrReg, rsp);
+ PrepareCallCFunction(num_params);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
+ uint32_t num_params) {
+ int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
+ leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+}
+
+void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
+ uint32_t param_idx,
+ uint32_t num_params) {
+ // On x64, all C call arguments fit in registers.
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ near_call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
+ // Set context to zero.
+ xorp(rsi, rsi);
+ CallRuntimeDelayed(zone, fid);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ subp(rsp, Immediate(size));
+ movp(addr, rsp);
+}
-void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
- testl(reg, reg);
- j(zero, label);
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addp(rsp, Immediate(size));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 9c0fa268f3..242130b035 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -19,7 +19,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
@@ -28,10 +27,6 @@ namespace wasm {
do { \
if (FLAG_trace_wasm_decoder && (cond)) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#define TRACE_IF(...)
-#endif
// A {DecodeResult} only stores the failure / success status, but no data. Thus
// we use {nullptr_t} as data value, such that the only valid data stored in
@@ -50,16 +45,21 @@ class Decoder {
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
- : start_(start), pc_(start), end_(end), buffer_offset_(buffer_offset) {}
+ : Decoder(start, start, end, buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
uint32_t buffer_offset = 0)
- : start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {}
+ : start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {
+ DCHECK_LE(start, pc);
+ DCHECK_LE(pc, end);
+ DCHECK_EQ(static_cast<uint32_t>(end - start), end - start);
+ }
virtual ~Decoder() {}
inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
- if (V8_UNLIKELY(pc + length > end_)) {
+ DCHECK_LE(pc, end_);
+ if (V8_UNLIKELY(length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
return false;
}
@@ -166,16 +166,12 @@ class Decoder {
// Check that at least {size} bytes exist between {pc_} and {end_}.
bool checkAvailable(uint32_t size) {
- uintptr_t pc_overflow_value = std::numeric_limits<uintptr_t>::max() - size;
- if ((uintptr_t)pc_ > pc_overflow_value) {
- errorf(pc_, "reading %u bytes would underflow/overflow", size);
- return false;
- } else if (pc_ < start_ || end_ < (pc_ + size)) {
+ DCHECK_LE(pc_, end_);
+ if (V8_UNLIKELY(size > static_cast<uint32_t>(end_ - pc_))) {
errorf(pc_, "expected %u bytes, fell off end", size);
return false;
- } else {
- return true;
}
+ return true;
}
void error(const char* msg) { errorf(pc_, "%s", msg); }
@@ -232,6 +228,8 @@ class Decoder {
// Resets the boundaries of this decoder.
void Reset(const byte* start, const byte* end, uint32_t buffer_offset = 0) {
+ DCHECK_LE(start, end);
+ DCHECK_EQ(static_cast<uint32_t>(end - start), end - start);
start_ = start;
pc_ = start;
end_ = end;
@@ -316,7 +314,8 @@ class Decoder {
static_assert(byte_index < kMaxLength, "invalid template instantiation");
constexpr int shift = byte_index * 7;
constexpr bool is_last_byte = byte_index == kMaxLength - 1;
- const bool at_end = validate && pc >= end_;
+ DCHECK_LE(pc, end_);
+ const bool at_end = validate && pc == end_;
byte b = 0;
if (!at_end) {
DCHECK_LT(pc, end_);
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index ffbf85cde8..04d918b0a4 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -22,14 +22,12 @@ namespace wasm {
struct WasmGlobal;
struct WasmException;
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
+
+#define TRACE_INST_FORMAT " @%-8d #%-20s|"
// Return the evaluation of `condition` if validate==true, DCHECK that it's
// true and always return true otherwise.
@@ -250,10 +248,11 @@ struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
FunctionSig* sig = nullptr;
- unsigned length;
+ unsigned length = 0;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
if (!VALIDATE(table_index == 0)) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
@@ -342,7 +341,7 @@ template <Decoder::ValidateFlag validate>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
- unsigned length;
+ unsigned length = 0;
inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
uint32_t max_alignment) {
unsigned alignment_length;
@@ -354,6 +353,7 @@ struct MemoryAccessOperand {
"actual alignment is %u",
max_alignment, alignment);
}
+ if (!VALIDATE(decoder->ok())) return;
unsigned offset_length;
offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
&offset_length, "offset");
@@ -386,11 +386,12 @@ struct SimdShiftOperand {
// Operand for SIMD S8x16 shuffle operations.
template <Decoder::ValidateFlag validate>
struct Simd8x16ShuffleOperand {
- uint8_t shuffle[kSimd128Size];
+ uint8_t shuffle[kSimd128Size] = {0};
inline Simd8x16ShuffleOperand(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
+ if (!VALIDATE(decoder->ok())) return;
}
}
};
@@ -550,6 +551,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(StartFunctionBody, Control* block) \
F(FinishFunction) \
F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
/* Control: */ \
F(Block, Control* block) \
F(Loop, Control* block) \
@@ -582,12 +584,10 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(BrIf, const Value& cond, Control* target) \
F(BrTable, const BranchTableOperand<validate>& operand, const Value& key) \
F(Else, Control* if_block) \
- F(LoadMem, ValueType type, MachineType mem_type, \
- const MemoryAccessOperand<validate>& operand, const Value& index, \
- Value* result) \
- F(StoreMem, ValueType type, MachineType mem_type, \
- const MemoryAccessOperand<validate>& operand, const Value& index, \
- const Value& value) \
+ F(LoadMem, LoadType type, const MemoryAccessOperand<validate>& operand, \
+ const Value& index, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessOperand<validate>& operand, \
+ const Value& index, const Value& value) \
F(CurrentMemoryPages, Value* result) \
F(GrowMemory, const Value& value, Value* result) \
F(CallDirect, const CallFunctionOperand<validate>& operand, \
@@ -974,6 +974,8 @@ class WasmDecoder : public Decoder {
return 5;
case kExprF64Const:
return 9;
+ case kNumericPrefix:
+ return 2;
case kSimdPrefix: {
byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
WasmOpcode opcode =
@@ -1026,9 +1028,6 @@ class WasmDecoder : public Decoder {
std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
- }
// Handle "simple" opcodes with a fixed signature first.
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
@@ -1039,10 +1038,8 @@ class WasmDecoder : public Decoder {
switch (opcode) {
case kExprSelect:
return {3, 1};
- case kExprS128StoreMem:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
- case kExprS128LoadMem:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTeeLocal:
case kExprGrowMemory:
@@ -1083,6 +1080,24 @@ class WasmDecoder : public Decoder {
case kExprReturn:
case kExprUnreachable:
return {0, 0};
+ case kNumericPrefix:
+ case kAtomicPrefix:
+ case kSimdPrefix: {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
+ switch (opcode) {
+ case kExprI32AtomicStore:
+ case kExprI32AtomicStore8U:
+ case kExprI32AtomicStore16U:
+ case kExprS128StoreMem:
+ return {2, 0};
+ default: {
+ sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ return {sig->parameter_count(), sig->return_count()};
+ }
+ }
+ }
+ }
default:
V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
WasmOpcodes::OpcodeName(opcode));
@@ -1142,9 +1157,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK(stack_.empty());
DCHECK(control_.empty());
- if (FLAG_wasm_code_fuzzer_gen_test) {
- PrintRawWasmCode(this->start_, this->end_);
- }
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -1273,6 +1285,32 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
+ class TraceLine {
+ public:
+ static constexpr int kMaxLen = 512;
+ ~TraceLine() {
+ if (!FLAG_trace_wasm_decoder) return;
+ PrintF("%.*s\n", len_, buffer_);
+ }
+
+ // Appends a formatted string.
+ PRINTF_FORMAT(2, 3)
+ void Append(const char* format, ...) {
+ if (!FLAG_trace_wasm_decoder) return;
+ va_list va_args;
+ va_start(va_args, format);
+ size_t remaining_len = kMaxLen - len_;
+ Vector<char> remaining_msg_space(buffer_ + len_, remaining_len);
+ int len = VSNPrintF(remaining_msg_space, format, va_args);
+ va_end(va_args);
+ len_ += len < 0 ? remaining_len : len;
+ }
+
+ private:
+ char buffer_[kMaxLen];
+ int len_ = 0;
+ };
+
// Decodes the body of a function.
void DecodeFunctionBody() {
TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n",
@@ -1294,11 +1332,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
while (this->pc_ < this->end_) { // decoding loop.
unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*this->pc_);
+
+ CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
+
#if DEBUG
- if (FLAG_trace_wasm_decoder && !WasmOpcodes::IsPrefixOpcode(opcode)) {
- TRACE(" @%-8d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TraceLine trace_msg;
+#define TRACE_PART(...) trace_msg.Append(__VA_ARGS__)
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
}
+#else
+#define TRACE_PART(...)
#endif
FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -1430,8 +1475,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error(this->pc_, "else already present for if");
break;
}
- c->kind = kControlIfElse;
FallThruTo(c);
+ c->kind = kControlIfElse;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
@@ -1450,6 +1495,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (c->is_onearmed_if()) {
// Emulate empty else arm.
FallThruTo(c);
+ if (this->failed()) break;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
@@ -1467,10 +1513,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
last_end_found_ = true;
// The result of the block is the return value.
- TRACE(" @%-8d #xx:%-20s|", startrel(this->pc_),
- "(implicit) return");
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
DoReturn(c, true);
- TRACE("\n");
}
PopControl(c);
@@ -1630,73 +1675,73 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprI32LoadMem8S:
- len = DecodeLoadMem(kWasmI32, MachineType::Int8());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
break;
case kExprI32LoadMem8U:
- len = DecodeLoadMem(kWasmI32, MachineType::Uint8());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
break;
case kExprI32LoadMem16S:
- len = DecodeLoadMem(kWasmI32, MachineType::Int16());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
break;
case kExprI32LoadMem16U:
- len = DecodeLoadMem(kWasmI32, MachineType::Uint16());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
break;
case kExprI32LoadMem:
- len = DecodeLoadMem(kWasmI32, MachineType::Int32());
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
break;
case kExprI64LoadMem8S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int8());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
break;
case kExprI64LoadMem8U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint8());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
break;
case kExprI64LoadMem16S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int16());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
break;
case kExprI64LoadMem16U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint16());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
break;
case kExprI64LoadMem32S:
- len = DecodeLoadMem(kWasmI64, MachineType::Int32());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
break;
case kExprI64LoadMem32U:
- len = DecodeLoadMem(kWasmI64, MachineType::Uint32());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
break;
case kExprI64LoadMem:
- len = DecodeLoadMem(kWasmI64, MachineType::Int64());
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
break;
case kExprF32LoadMem:
- len = DecodeLoadMem(kWasmF32, MachineType::Float32());
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
break;
case kExprF64LoadMem:
- len = DecodeLoadMem(kWasmF64, MachineType::Float64());
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
break;
case kExprI32StoreMem8:
- len = DecodeStoreMem(kWasmI32, MachineType::Int8());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
break;
case kExprI32StoreMem16:
- len = DecodeStoreMem(kWasmI32, MachineType::Int16());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
break;
case kExprI32StoreMem:
- len = DecodeStoreMem(kWasmI32, MachineType::Int32());
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
break;
case kExprI64StoreMem8:
- len = DecodeStoreMem(kWasmI64, MachineType::Int8());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
break;
case kExprI64StoreMem16:
- len = DecodeStoreMem(kWasmI64, MachineType::Int16());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
break;
case kExprI64StoreMem32:
- len = DecodeStoreMem(kWasmI64, MachineType::Int32());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
break;
case kExprI64StoreMem:
- len = DecodeStoreMem(kWasmI64, MachineType::Int64());
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
break;
case kExprF32StoreMem:
- len = DecodeStoreMem(kWasmF32, MachineType::Float32());
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
break;
case kExprF64StoreMem:
- len = DecodeStoreMem(kWasmF64, MachineType::Float64());
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
break;
case kExprGrowMemory: {
if (!CheckHasMemory()) break;
@@ -1742,14 +1787,31 @@ class WasmFullDecoder : public WasmDecoder<validate> {
args_.data(), returns);
break;
}
+ case kNumericPrefix: {
+ CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
+ ++len;
+ byte numeric_index = this->template read_u8<validate>(
+ this->pc_ + 1, "numeric index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ sig = WasmOpcodes::Signature(opcode);
+ if (sig == nullptr) {
+ this->errorf(this->pc_, "Unrecognized numeric opcode: %x\n",
+ opcode);
+ return;
+ }
+ BuildSimpleOperator(opcode, sig);
+ break;
+ }
case kSimdPrefix: {
CHECK_PROTOTYPE_OPCODE(simd);
len++;
byte simd_index =
this->template read_u8<validate>(this->pc_ + 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- TRACE(" @%-4d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
len += DecodeSimdOpcode(opcode);
break;
}
@@ -1760,8 +1822,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE(" @%-4d #%-20s|", startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
len += DecodeAtomicOpcode(opcode);
break;
}
@@ -1782,62 +1844,64 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#if DEBUG
if (FLAG_trace_wasm_decoder) {
- PrintF(" ");
+ TRACE_PART(" ");
for (Control& c : control_) {
switch (c.kind) {
case kControlIf:
- PrintF("I");
+ TRACE_PART("I");
break;
case kControlBlock:
- PrintF("B");
+ TRACE_PART("B");
break;
case kControlLoop:
- PrintF("L");
+ TRACE_PART("L");
break;
case kControlTry:
- PrintF("T");
+ TRACE_PART("T");
break;
default:
break;
}
- if (c.start_merge.arity) PrintF("%u-", c.start_merge.arity);
- PrintF("%u", c.end_merge.arity);
- if (!c.reachable()) PrintF("%c", c.unreachable() ? '*' : '#');
+ if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
+ TRACE_PART("%u", c.end_merge.arity);
+ if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
}
- PrintF(" | ");
+ TRACE_PART(" | ");
for (size_t i = 0; i < stack_.size(); ++i) {
auto& val = stack_[i];
WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
}
- PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
- static_cast<int>(val.pc - this->start_),
- WasmOpcodes::OpcodeName(opcode));
+ TRACE_PART(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
+ static_cast<int>(val.pc - this->start_),
+ WasmOpcodes::OpcodeName(opcode));
+ // If the decoder failed, don't try to decode the operands, as this
+ // can trigger a DCHECK failure.
+ if (this->failed()) continue;
switch (opcode) {
case kExprI32Const: {
- ImmI32Operand<validate> operand(this, val.pc);
- PrintF("[%d]", operand.value);
+ ImmI32Operand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%d]", operand.value);
break;
}
case kExprGetLocal:
case kExprSetLocal:
case kExprTeeLocal: {
- LocalIndexOperand<Decoder::kValidate> operand(this, val.pc);
- PrintF("[%u]", operand.index);
+ LocalIndexOperand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%u]", operand.index);
break;
}
case kExprGetGlobal:
case kExprSetGlobal: {
- GlobalIndexOperand<validate> operand(this, val.pc);
- PrintF("[%u]", operand.index);
+ GlobalIndexOperand<Decoder::kNoValidate> operand(this, val.pc);
+ TRACE_PART("[%u]", operand.index);
break;
}
default:
break;
}
}
- PrintF("\n");
}
#endif
this->pc_ += len;
@@ -1941,49 +2005,23 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- int DecodeLoadMem(ValueType type, MachineType mem_type) {
+ int DecodeLoadMem(LoadType type, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_, ElementSizeLog2Of(mem_type.representation()));
-
- auto index = Pop(0, kWasmI32);
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
- result);
- return 1 + operand.length;
- }
-
- int DecodeStoreMem(ValueType type, MachineType mem_type) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_, ElementSizeLog2Of(mem_type.representation()));
- auto value = Pop(1, type);
- auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
- value);
- return 1 + operand.length;
- }
-
- int DecodePrefixedLoadMem(ValueType type, MachineType mem_type) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
-
+ MemoryAccessOperand<validate> operand(this, this->pc_ + prefix_len,
+ type.size_log_2());
auto index = Pop(0, kWasmI32);
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(LoadMem, type, mem_type, operand, index,
- result);
+ auto* result = Push(type.value_type());
+ CALL_INTERFACE_IF_REACHABLE(LoadMem, type, operand, index, result);
return operand.length;
}
- int DecodePrefixedStoreMem(ValueType type, MachineType mem_type) {
+ int DecodeStoreMem(StoreType store, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand<validate> operand(
- this, this->pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
- auto value = Pop(1, type);
+ MemoryAccessOperand<validate> operand(this, this->pc_ + prefix_len,
+ store.size_log_2());
+ auto value = Pop(1, store.value_type());
auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(StoreMem, type, mem_type, operand, index,
- value);
+ CALL_INTERFACE_IF_REACHABLE(StoreMem, store, operand, index, value);
return operand.length;
}
@@ -2073,10 +2111,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprS128LoadMem:
- len = DecodePrefixedLoadMem(kWasmS128, MachineType::Simd128());
+ len = DecodeLoadMem(LoadType::kS128Load, 1);
break;
case kExprS128StoreMem:
- len = DecodePrefixedStoreMem(kWasmS128, MachineType::Simd128());
+ len = DecodeStoreMem(StoreType::kS128Store, 1);
break;
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -2347,6 +2385,7 @@ class EmptyInterface {
};
#undef TRACE
+#undef TRACE_INST_FORMAT
#undef VALIDATE
#undef CHECK_PROTOTYPE_OPCODE
#undef OPCODE_ERROR
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index e9130f001d..57ee78f91c 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -142,11 +142,11 @@ class WasmGraphBuildingInterface {
block->end_env = break_env;
}
- void FinishFunction(Decoder* decoder) {
- builder_->PatchInStackCheckIfNeeded();
- }
+ void FinishFunction(Decoder*) { builder_->PatchInStackCheckIfNeeded(); }
+
+ void OnFirstError(Decoder*) {}
- void OnFirstError(Decoder* decoder) {}
+ void NextInstruction(Decoder*, WasmOpcode) {}
void Block(Decoder* decoder, Control* block) {
// The break environment is the outer environment.
@@ -215,8 +215,8 @@ class WasmGraphBuildingInterface {
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
- result->node =
- BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ if (result) result->node = node;
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
@@ -340,18 +340,20 @@ class WasmGraphBuildingInterface {
SetEnv(if_block->false_env);
}
- void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ void LoadMem(Decoder* decoder, LoadType type,
const MemoryAccessOperand<validate>& operand, const Value& index,
Value* result) {
- result->node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
- operand.alignment, decoder->position());
+ result->node =
+ BUILD(LoadMem, type.value_type(), type.mem_type(), index.node,
+ operand.offset, operand.alignment, decoder->position());
}
- void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
+ void StoreMem(Decoder* decoder, StoreType type,
const MemoryAccessOperand<validate>& operand,
const Value& index, const Value& value) {
- BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
- value.node, decoder->position(), type);
+ BUILD(StoreMem, type.mem_rep(), index.node, operand.offset,
+ operand.alignment, value.node, decoder->position(),
+ type.value_type());
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
@@ -729,13 +731,12 @@ class WasmGraphBuildingInterface {
return loop_body_env;
}
- // Create a complete copy of the {from}.
+ // Create a complete copy of {from}.
SsaEnv* Split(Decoder* decoder, SsaEnv* from) {
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- // The '+ 2' here is to accommodate for mem_size and mem_start nodes.
- size_t size = sizeof(TFNode*) * (decoder->NumLocals());
+ size_t size = sizeof(TFNode*) * decoder->NumLocals();
result->control = from->control;
result->effect = from->effect;
@@ -878,7 +879,8 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
void PrintRawWasmCode(const byte* start, const byte* end) {
AccountingAllocator allocator;
- PrintRawWasmCode(&allocator, FunctionBodyForTesting(start, end), nullptr);
+ PrintRawWasmCode(&allocator, FunctionBody{nullptr, 0, start, end}, nullptr,
+ kPrintLocals);
}
namespace {
@@ -897,7 +899,8 @@ const char* RawOpcodeName(WasmOpcode opcode) {
} // namespace
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module) {
+ const wasm::WasmModule* module,
+ PrintLocals print_locals) {
OFStream os(stdout);
Zone zone(allocator, ZONE_NAME);
WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
@@ -913,7 +916,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
// Print the local declarations.
BodyLocalDecls decls(&zone);
BytecodeIterator i(body.start, body.end, &decls);
- if (body.start != i.pc() && !FLAG_wasm_code_fuzzer_gen_test) {
+ if (body.start != i.pc() && print_locals == kPrintLocals) {
os << "// locals: ";
if (!decls.type_list.empty()) {
ValueType type = decls.type_list[0];
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 8df1c8a09e..50eb2295c9 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -34,12 +34,11 @@ struct FunctionBody {
uint32_t offset; // offset in the module bytes, for error reporting
const byte* start; // start of the function body
const byte* end; // end of the function body
-};
-static inline FunctionBody FunctionBodyForTesting(const byte* start,
- const byte* end) {
- return {nullptr, 0, start, end};
-}
+ FunctionBody(FunctionSig* sig, uint32_t offset, const byte* start,
+ const byte* end)
+ : sig(sig), offset(offset), start(start), end(end) {}
+};
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
@@ -54,8 +53,10 @@ DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
FunctionBody& body);
+enum PrintLocals { kPrintLocals, kOmitLocals };
+V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module);
+ const wasm::WasmModule* module, PrintLocals print_locals);
// A simplified form of AST printing, e.g. from a debugger.
void PrintRawWasmCode(const byte* start, const byte* end);
@@ -63,14 +64,14 @@ void PrintRawWasmCode(const byte* start, const byte* end);
inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, 0, start, end};
+ FunctionBody body(sig, 0, start, end);
return VerifyWasmCode(allocator, module, body);
}
inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
TFBuilder* builder, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, 0, start, end};
+ FunctionBody body(sig, 0, start, end);
return BuildTFGraph(allocator, builder, body);
}
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index d6e7891fc0..75a790db50 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -8,18 +8,18 @@
namespace v8 {
namespace internal {
-namespace tracing {
+namespace wasm {
-void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
- MachineRepresentation rep, uint32_t addr,
+void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start) {
EmbeddedVector<char, 64> value;
- switch (rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- ReadLittleEndianValue<ctype1>(mem_start + addr), \
- ReadLittleEndianValue<ctype2>(mem_start + addr)); \
+ auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
+ switch (mem_rep) {
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ ReadLittleEndianValue<ctype1>(mem_start + info->address), \
+ ReadLittleEndianValue<ctype2>(mem_start + info->address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
@@ -33,17 +33,20 @@ void TraceMemoryOperation(ExecutionEngine engine, bool is_store,
}
char eng_c = '?';
switch (engine) {
- case kWasmCompiled:
- eng_c = 'C';
+ case ExecutionEngine::kTurbofan:
+ eng_c = 'T';
break;
- case kWasmInterpreted:
+ case ExecutionEngine::kLiftoff:
+ eng_c = 'L';
+ break;
+ case ExecutionEngine::kInterpreter:
eng_c = 'I';
break;
}
printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position,
- is_store ? "store" : "read ", addr, value.start());
+ info->is_store ? "store" : "load ", info->address, value.start());
}
-} // namespace tracing
+} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index 7d7bc288c0..33170aefbe 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MEMORY_TRACING_H
-#define V8_MEMORY_TRACING_H
+#ifndef V8_WASM_MEMORY_TRACING_H_
+#define V8_WASM_MEMORY_TRACING_H_
#include <cstdint>
@@ -11,18 +11,31 @@
namespace v8 {
namespace internal {
-namespace tracing {
+namespace wasm {
-enum ExecutionEngine { kWasmCompiled, kWasmInterpreted };
+enum class ExecutionEngine { kTurbofan, kLiftoff, kInterpreter };
+
+// This struct is create in generated code, hence use low-level types.
+struct MemoryTracingInfo {
+ uint32_t address;
+ uint8_t is_store; // 0 or 1
+ uint8_t mem_rep;
+ static_assert(
+ std::is_same<decltype(mem_rep),
+ std::underlying_type<MachineRepresentation>::type>::value,
+ "MachineRepresentation uses uint8_t");
+
+ MemoryTracingInfo(uint32_t addr, bool is_store, MachineRepresentation rep)
+ : address(addr), is_store(is_store), mem_rep(static_cast<uint8_t>(rep)) {}
+};
// Callback for tracing a memory operation for debugging.
// Triggered by --wasm-trace-memory.
-void TraceMemoryOperation(ExecutionEngine, bool is_store, MachineRepresentation,
- uint32_t addr, int func_index, int position,
- uint8_t* mem_start);
+void TraceMemoryOperation(ExecutionEngine, const MemoryTracingInfo* info,
+ int func_index, int position, uint8_t* mem_start);
-} // namespace tracing
+} // namespace wasm
} // namespace internal
} // namespace v8
-#endif /* !V8_MEMORY_TRACING_H */
+#endif // V8_WASM_MEMORY_TRACING_H_
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 4bd52a2a8f..4a2e610b99 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -19,8 +19,9 @@
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -51,12 +52,12 @@
if (FLAG_trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \
} while (false)
-static const int kInvalidSigIndex = -1;
-
namespace v8 {
namespace internal {
namespace wasm {
+static constexpr int kInvalidSigIndex = -1;
+
// A class compiling an entire module.
class ModuleCompiler {
public:
@@ -103,7 +104,9 @@ class ModuleCompiler {
compiler_->counters()));
}
- void Commit() {
+ bool Commit() {
+ if (units_.empty()) return false;
+
{
base::LockGuard<base::Mutex> guard(
&compiler_->compilation_units_mutex_);
@@ -113,6 +116,7 @@ class ModuleCompiler {
std::make_move_iterator(units_.end()));
}
units_.clear();
+ return true;
}
void Clear() { units_.clear(); }
@@ -165,8 +169,13 @@ class ModuleCompiler {
bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
- bool ShouldIncreaseWorkload() const {
- return executed_units_.ShouldIncreaseWorkload();
+ bool ShouldIncreaseWorkload() {
+ if (executed_units_.ShouldIncreaseWorkload()) {
+ // Check if it actually makes sense to increase the workload.
+ base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
+ return !compilation_units_.empty();
+ }
+ return false;
}
size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
@@ -241,7 +250,8 @@ class JSToWasmWrapperCache {
Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
wasm::WasmModule* module,
WasmCodeWrapper wasm_code,
- uint32_t index) {
+ uint32_t index,
+ bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
int cached_idx = sig_map_.Find(func->sig);
if (cached_idx >= 0) {
@@ -263,22 +273,19 @@ class JSToWasmWrapperCache {
}
}
} else {
- for (RelocIterator it(*code,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- ; it.next()) {
- DCHECK(!it.done());
- it.rinfo()->set_js_to_wasm_address(
- isolate, wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
- break;
- }
+ RelocIterator it(*code,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ it.rinfo()->set_js_to_wasm_address(
+ isolate, wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
}
return code;
}
Handle<Code> code = compiler::CompileJSToWasmWrapper(
- isolate, module, wasm_code, index, context_address_);
+ isolate, module, wasm_code, index, context_address_, use_trap_handler);
uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
DCHECK_EQ(code_cache_.size(), new_cache_idx);
USE(new_cache_idx);
@@ -312,8 +319,7 @@ class InstanceBuilder {
struct TableInstance {
Handle<WasmTableObject> table_object; // WebAssembly.Table instance
Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal code array
- Handle<FixedArray> signature_table; // internal sig array
+ Handle<FixedArray> function_table; // internal array of <sig,code> pairs
};
// A pre-evaluated value to use in import binding.
@@ -343,6 +349,8 @@ class InstanceBuilder {
}
Counters* counters() const { return async_counters().get(); }
+ bool use_trap_handler() const { return compiled_module_->use_trap_handler(); }
+
// Helper routines to print out errors with imports.
#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
void Report##TYPE(const char* error, uint32_t index, \
@@ -435,12 +443,13 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
} else {
TRACE("Finalizing %d {\n", compiled_module->instance_id());
- if (trap_handler::UseTrapHandler()) {
+ if (compiled_module->use_trap_handler()) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- Handle<FixedArray> code_table = compiled_module->code_table();
+ DisallowHeapAllocation no_gc;
+ FixedArray* code_table = compiled_module->code_table();
for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ Code* code = Code::cast(code_table->get(i));
int index = code->trap_handler_index()->value();
if (index >= 0) {
trap_handler::ReleaseHandlerData(index);
@@ -450,7 +459,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
}
- WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+ WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
// Since the order of finalizers is not guaranteed, it can be the case
// that {instance->compiled_module()->module()}, which is a
@@ -483,7 +492,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
WasmCompiledModule::Reset(isolate, compiled_module);
} else {
WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(compiled_module->ptr_to_next_instance());
+ ->set_compiled_module(compiled_module->next_instance());
}
}
}
@@ -500,14 +509,29 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
}
-} // namespace
+// This is used in ProcessImports.
+// When importing other modules' exports, we need to ask
+// the exporter for a WasmToWasm wrapper. To do that, we need to
+// switch that module to RW. To avoid flip-floping the same module
+// RW <->RX, we create a scope for a set of NativeModules.
+class SetOfNativeModuleModificationScopes final {
+ public:
+ void Add(NativeModule* module) {
+ module->SetExecutable(false);
+ native_modules_.insert(module);
+ }
-bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
- if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), true, kWasmOrigin);
- return result.ok();
-}
+ ~SetOfNativeModuleModificationScopes() {
+ for (NativeModule* module : native_modules_) {
+ module->SetExecutable(true);
+ }
+ }
+
+ private:
+ std::unordered_set<NativeModule*> native_modules_;
+};
+
+} // namespace
MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
@@ -530,13 +554,8 @@ MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
ErrorThrower* thrower,
const ModuleWireBytes& bytes) {
- // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
-
- ModuleResult result = SyncDecodeWasmModule(
- isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kWasmOrigin);
if (result.failed()) {
thrower->CompileFailed("Wasm decoding failed", result);
return {};
@@ -545,7 +564,7 @@ MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
// Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
// in {CompileToModuleObject}.
return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes_copy, Handle<Script>(),
+ isolate, thrower, std::move(result.val), bytes, Handle<Script>(),
Vector<const byte>());
}
@@ -602,12 +621,22 @@ void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
}
void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes) {
+ const ModuleWireBytes& bytes, bool is_shared) {
if (!FLAG_wasm_async_compilation) {
+ // Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, "WasmCompile");
- // Compile the module.
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
+ MaybeHandle<WasmModuleObject> module_object;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_object = SyncCompile(isolate, &thrower, bytes);
+ }
if (thrower.error()) {
RejectPromise(isolate, handle(isolate->context()), thrower, promise);
return;
@@ -619,8 +648,10 @@ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
if (FLAG_wasm_test_streaming) {
std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_compilation_manager()->StartStreamingCompilation(
- isolate, handle(isolate->context()), promise);
+ isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(isolate, handle(isolate->context()),
+ promise);
streaming_decoder->OnBytesReceived(bytes.module_bytes());
streaming_decoder->Finish();
return;
@@ -629,7 +660,7 @@ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
// during asynchronous compilation.
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_compilation_manager()->StartAsyncCompileJob(
+ isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
isolate, std::move(copy), bytes.length(), handle(isolate->context()),
promise);
}
@@ -692,6 +723,8 @@ Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
->shared()
->lazy_compilation_orchestrator())
->get();
+ DCHECK(!orchestrator->IsFrozenForTesting());
+
Handle<Code> compiled_code = orchestrator->CompileLazyOnGCHeap(
isolate, instance, caller_code, offset, func_index, patch_caller);
if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
@@ -705,8 +738,9 @@ Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
- exp_table->set(exp_index, *compiled_code);
+ int table_index = compiler::FunctionTableCodeOffset(exp_index);
+ DCHECK(exp_table->get(table_index) == *lazy_compile_code);
+ exp_table->set(table_index, *compiled_code);
}
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -737,11 +771,13 @@ Address CompileLazy(Isolate* isolate) {
Maybe<uint32_t> func_index_to_compile = Nothing<uint32_t>();
Handle<Object> exp_deopt_data_entry;
const wasm::WasmCode* lazy_stub_or_copy =
- isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
- DCHECK_EQ(wasm::WasmCode::LazyStub, lazy_stub_or_copy->kind());
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
+ DCHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub_or_copy->kind());
if (!lazy_stub_or_copy->IsAnonymous()) {
// Then it's an indirect call or via JS->wasm wrapper.
- instance = lazy_stub_or_copy->owner()->compiled_module()->owning_instance();
+ instance =
+ handle(lazy_stub_or_copy->owner()->compiled_module()->owning_instance(),
+ isolate);
func_index_to_compile = Just(lazy_stub_or_copy->index());
exp_deopt_data_entry =
handle(instance->compiled_module()->lazy_compile_data()->get(
@@ -761,15 +797,16 @@ Address CompileLazy(Isolate* isolate) {
js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
} else {
wasm_caller_code =
- isolate->wasm_code_manager()->LookupCode(it.frame()->pc());
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
offset = Just(static_cast<uint32_t>(
it.frame()->pc() - wasm_caller_code->instructions().start()));
if (instance.is_null()) {
// Then this is a direct call (otherwise we would have attached the
// instance via deopt data to the lazy compile stub). Just use the
// instance of the caller.
- instance =
- wasm_caller_code->owner()->compiled_module()->owning_instance();
+ instance = handle(
+ wasm_caller_code->owner()->compiled_module()->owning_instance(),
+ isolate);
}
}
@@ -779,6 +816,11 @@ Address CompileLazy(Isolate* isolate) {
Managed<wasm::LazyCompilationOrchestrator>::cast(
compiled_module->shared()->lazy_compilation_orchestrator())
->get();
+ DCHECK(!orchestrator->IsFrozenForTesting());
+
+ NativeModuleModificationScope native_module_modification_scope(
+ compiled_module->GetNativeModule());
+
const wasm::WasmCode* result = nullptr;
// The caller may be js to wasm calling a function
// also available for indirect calls.
@@ -812,13 +854,15 @@ Address CompileLazy(Isolate* isolate) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
// of <export_table, index> followed by undefined values. Use this
// information here to patch all export tables.
+ Handle<Foreign> foreign_holder =
+ isolate->factory()->NewForeign(result->instructions().start(), TENURED);
for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ DisallowHeapAllocation no_gc;
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
- result->instructions().start(), TENURED);
- exp_table->set(exp_index, *foreign_holder);
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ exp_table->set(compiler::FunctionTableCodeOffset(exp_index),
+ *foreign_holder);
}
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -834,42 +878,31 @@ Address CompileLazy(Isolate* isolate) {
compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
DisallowHeapAllocation no_gc;
- WasmModule* module = compiled_module->module();
- std::vector<Handle<Code>> empty_code;
+ WasmModule* module = compiled_module->shared()->module();
if (FLAG_wasm_jit_to_native) {
NativeModule* native_module = compiled_module->GetNativeModule();
- std::vector<GlobalHandleAddress> function_tables =
- native_module->function_tables();
- std::vector<GlobalHandleAddress> signature_tables =
- native_module->signature_tables();
-
- compiler::ModuleEnv result = {module, // --
- function_tables, // --
- signature_tables, // --
- empty_code,
- BUILTIN_CODE(isolate, WasmCompileLazy)};
+ compiler::ModuleEnv result(module, native_module->function_tables(),
+ std::vector<Handle<Code>>{},
+ BUILTIN_CODE(isolate, WasmCompileLazy),
+ compiled_module->use_trap_handler());
return result;
- } else {
- std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
-
- int num_function_tables = static_cast<int>(module->function_tables.size());
- for (int i = 0; i < num_function_tables; ++i) {
- FixedArray* ft = compiled_module->ptr_to_function_tables();
- FixedArray* st = compiled_module->ptr_to_signature_tables();
+ }
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- signature_tables.push_back(WasmCompiledModule::GetTableValue(st, i));
- }
+ std::vector<GlobalHandleAddress> function_tables;
- compiler::ModuleEnv result = {module, // --
- function_tables, // --
- signature_tables, // --
- empty_code, // --
- BUILTIN_CODE(isolate, WasmCompileLazy)};
- return result;
+ int num_function_tables = static_cast<int>(module->function_tables.size());
+ FixedArray* ft =
+ num_function_tables == 0 ? nullptr : compiled_module->function_tables();
+ for (int i = 0; i < num_function_tables; ++i) {
+ // TODO(clemensh): defer these handles for concurrent compilation.
+ function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
}
+
+ compiler::ModuleEnv result(module, std::move(function_tables),
+ std::vector<Handle<Code>>{},
+ BUILTIN_CODE(isolate, WasmCompileLazy),
+ compiled_module->use_trap_handler());
+ return result;
}
const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
@@ -882,7 +915,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
static_cast<uint32_t>(func_index));
if (existing_code != nullptr &&
- existing_code->kind() == wasm::WasmCode::Function) {
+ existing_code->kind() == wasm::WasmCode::kFunction) {
TRACE_LAZY("Function %d already compiled.\n", func_index);
return existing_code;
}
@@ -897,7 +930,8 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
compiler::ModuleEnv module_env =
CreateModuleEnvFromCompiledModule(isolate, compiled_module);
- const uint8_t* module_start = compiled_module->module_bytes()->GetChars();
+ const uint8_t* module_start =
+ compiled_module->shared()->module_bytes()->GetChars();
const WasmFunction* func = &module_env.module->functions[func_index];
FunctionBody body{func->sig, func->code.offset(),
@@ -908,7 +942,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
std::string func_name;
{
WasmName name = Vector<const char>::cast(
- compiled_module->GetRawFunctionName(func_index));
+ compiled_module->shared()->GetRawFunctionName(func_index));
// Copy to std::string, because the underlying string object might move on
// the heap.
func_name.assign(name.start(), static_cast<size_t>(name.length()));
@@ -1000,6 +1034,40 @@ Code* ExtractWasmToWasmCallee(Code* wasm_to_wasm) {
return callee;
}
+const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
+ const WasmCode* wasm_to_wasm) {
+ DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind());
+ // Find the one code target in this wrapper.
+ RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(),
+ wasm_to_wasm->constant_pool(),
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ const WasmCode* callee =
+ code_manager->LookupCode(it.rinfo()->js_to_wasm_address());
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
+ return callee;
+}
+
+// TODO(mtrofin): this should be a function again, when chromium:761307
+// is addressed. chromium:771171 is also related.
+#define WasmPatchWasmToWasmWrapper(isolate, wasm_to_wasm, new_target) \
+ do { \
+ TRACE_LAZY("Patching wasm-to-wasm wrapper.\n"); \
+ DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind()); \
+ NativeModuleModificationScope scope(wasm_to_wasm->owner()); \
+ RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(), \
+ wasm_to_wasm->constant_pool(), \
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
+ DCHECK(!it.done()); \
+ it.rinfo()->set_js_to_wasm_address(isolate, \
+ new_target->instructions().start()); \
+ it.next(); \
+ DCHECK(it.done()); \
+ } while (0)
+
void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
Code* new_target) {
DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
@@ -1051,9 +1119,10 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
Handle<WasmCompiledModule> caller_module(
caller_func_info.instance.ToHandleChecked()->compiled_module(),
isolate);
- SeqOneByteString* module_bytes = caller_module->module_bytes();
+ SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
const byte* func_bytes =
- module_bytes->GetChars() + caller_module->module()
+ module_bytes->GetChars() + caller_module->shared()
+ ->module()
->functions[caller_func_info.func_index]
.code.offset();
Code* lazy_callee = nullptr;
@@ -1182,24 +1251,32 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
CompileFunction(isolate, instance, exported_func_index);
{
DisallowHeapAllocation no_gc;
- int idx = 0;
- for (RelocIterator it(*js_to_wasm_caller,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- !it.done(); it.next()) {
- ++idx;
- const wasm::WasmCode* callee_compiled =
- compiled_module->GetNativeModule()->GetCode(exported_func_index);
- DCHECK_NOT_NULL(callee_compiled);
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ RelocIterator it(*js_to_wasm_caller,
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ wasm::WasmCode* current_callee =
+ isolate->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address());
+ const wasm::WasmCode* callee_compiled =
+ compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ DCHECK_NOT_NULL(callee_compiled);
+ if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
+ } else {
it.rinfo()->set_js_to_wasm_address(
isolate, callee_compiled->instructions().start());
}
- DCHECK_EQ(1, idx);
+#ifdef DEBUG
+ it.next();
+ DCHECK(it.done());
+#endif
}
wasm::WasmCode* ret =
compiled_module->GetNativeModule()->GetCode(exported_func_index);
DCHECK_NOT_NULL(ret);
- DCHECK_EQ(wasm::WasmCode::Function, ret->kind());
+ DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind());
return ret;
}
@@ -1217,36 +1294,30 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Maybe<uint32_t> maybe_func_to_return_idx, const wasm::WasmCode* wasm_caller,
int call_offset) {
- struct WasmDirectCallData {
- uint32_t offset = 0;
- uint32_t func_index = 0;
- };
- std::vector<Maybe<WasmDirectCallData>> non_compiled_functions;
+ std::vector<Maybe<uint32_t>> non_compiled_functions;
Decoder decoder(nullptr, nullptr);
+ WasmCode* last_callee = nullptr;
+
{
DisallowHeapAllocation no_gc;
Handle<WasmCompiledModule> caller_module(
wasm_caller->owner()->compiled_module(), isolate);
- SeqOneByteString* module_bytes = caller_module->module_bytes();
+ SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
uint32_t caller_func_index = wasm_caller->index();
SourcePositionTableIterator source_pos_iterator(
Handle<ByteArray>(ByteArray::cast(
caller_module->source_positions()->get(caller_func_index))));
const byte* func_bytes =
- module_bytes->GetChars() +
- caller_module->module()->functions[caller_func_index].code.offset();
+ module_bytes->GetChars() + caller_module->shared()
+ ->module()
+ ->functions[caller_func_index]
+ .code.offset();
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
- const WasmCode* callee = isolate->wasm_code_manager()->LookupCode(
- it.rinfo()->target_address());
- if (callee->kind() != WasmCode::LazyStub) {
- non_compiled_functions.push_back(Nothing<WasmDirectCallData>());
- continue;
- }
// TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
// (depending on the bool) against limits of T and then static_casts.
size_t offset_l = it.rinfo()->pc() - wasm_caller->instructions().start();
@@ -1254,14 +1325,19 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
int offset = static_cast<int>(offset_l);
int byte_pos =
AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+
+ WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->target_address());
+ if (offset < call_offset) last_callee = callee;
+ if (callee->kind() != WasmCode::kLazyStub) {
+ non_compiled_functions.push_back(Nothing<uint32_t>());
+ continue;
+ }
uint32_t called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(called_func_index,
caller_module->GetNativeModule()->FunctionCount());
- WasmDirectCallData data;
- data.offset = offset;
- data.func_index = called_func_index;
- non_compiled_functions.push_back(Just<WasmDirectCallData>(data));
+ non_compiled_functions.push_back(Just(called_func_index));
// Call offset one instruction after the call. Remember the last called
// function before that offset.
if (offset < call_offset) {
@@ -1269,7 +1345,15 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
}
}
}
- uint32_t func_to_return_idx = maybe_func_to_return_idx.ToChecked();
+ uint32_t func_to_return_idx = 0;
+
+ if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ const WasmCode* actual_callee = WasmExtractWasmToWasmCallee(
+ isolate->wasm_engine()->code_manager(), last_callee);
+ func_to_return_idx = actual_callee->index();
+ } else {
+ func_to_return_idx = maybe_func_to_return_idx.ToChecked();
+ }
TRACE_LAZY(
"Starting lazy compilation (func %u @%d, js_to_wasm: false, patch "
@@ -1278,15 +1362,16 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
- CompileFunction(isolate, instance, func_to_return_idx);
-
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
- WasmCode* ret =
- compiled_module->GetNativeModule()->GetCode(func_to_return_idx);
-
+ const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
DCHECK_NOT_NULL(ret);
- {
+
+ if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
+ // We can finish it all here by compiling the target wasm function and
+ // patching the wasm_to_wasm caller.
+ WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
+ } else {
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
DisallowHeapAllocation no_gc;
// Now patch the code object with all functions which are now compiled. This
// will pick up any other compiled functions, not only {ret}.
@@ -1299,10 +1384,10 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
!it.done(); it.next(), ++idx) {
auto& info = non_compiled_functions[idx];
if (info.IsNothing()) continue;
- uint32_t lookup = info.ToChecked().func_index;
+ uint32_t lookup = info.ToChecked();
const WasmCode* callee_compiled =
compiled_module->GetNativeModule()->GetCode(lookup);
- if (callee_compiled->kind() != WasmCode::Function) continue;
+ if (callee_compiled->kind() != WasmCode::kFunction) continue;
it.rinfo()->set_wasm_call_address(
isolate, callee_compiled->instructions().start());
++patched;
@@ -1721,7 +1806,7 @@ WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
} else {
wasm::WasmCode* code = native_module->GetCode(func_index);
// {code} will be nullptr when exporting imports.
- if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub ||
+ if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub ||
!code->IsAnonymous()) {
return WasmCodeWrapper(code);
}
@@ -1787,7 +1872,7 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
EnsureExportedLazyDeoptData(isolate, instance, code_table,
native_module, func_index)
.GetWasmCode();
- if (code == nullptr || code->kind() != wasm::WasmCode::LazyStub)
+ if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub)
return WasmCodeWrapper(code);
// deopt_data:
@@ -1866,7 +1951,7 @@ WasmCodeWrapper MakeWasmToWasmWrapper(
new_wasm_context_address);
return WasmCodeWrapper(
instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- code, wasm::WasmCode::WasmToWasmWrapper, index));
+ code, wasm::WasmCode::kWasmToWasmWrapper, index));
}
}
@@ -1885,13 +1970,15 @@ WasmCodeWrapper UnwrapExportOrCompileImportWrapper(
// signature.
if (FLAG_wasm_jit_to_native) {
Handle<Code> temp_code = compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin, js_imports_table);
+ isolate, target, sig, import_index, origin,
+ instance->compiled_module()->use_trap_handler(), js_imports_table);
return WasmCodeWrapper(
instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- temp_code, wasm::WasmCode::WasmToJsWrapper, import_index));
+ temp_code, wasm::WasmCode::kWasmToJsWrapper, import_index));
} else {
return WasmCodeWrapper(compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin, js_imports_table));
+ isolate, target, sig, import_index, origin,
+ instance->compiled_module()->use_trap_handler(), js_imports_table));
}
}
@@ -1908,33 +1995,21 @@ void FunctionTableFinalizer(const v8::WeakCallbackInfo<void>& data) {
std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
Isolate* isolate, WasmModule* module, Handle<Code> illegal_builtin) {
std::vector<GlobalHandleAddress> function_tables;
- std::vector<GlobalHandleAddress> signature_tables;
- for (size_t i = 0; i < module->function_tables.size(); i++) {
+ for (size_t i = module->function_tables.size(); i > 0; --i) {
Handle<Object> func_table =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
- Handle<Object> sig_table =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
GlobalHandles::MakeWeak(func_table.location(), func_table.location(),
&FunctionTableFinalizer,
v8::WeakCallbackType::kFinalizer);
- GlobalHandles::MakeWeak(sig_table.location(), sig_table.location(),
- &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
function_tables.push_back(func_table.address());
- signature_tables.push_back(sig_table.address());
}
- std::vector<Handle<Code>> empty_code;
-
- compiler::ModuleEnv result = {
- module, // --
- function_tables, // --
- signature_tables, // --
- empty_code, // --
- illegal_builtin // --
- };
- return std::unique_ptr<compiler::ModuleEnv>(new compiler::ModuleEnv(result));
+ // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)?
+ bool use_trap_handler = trap_handler::IsTrapHandlerEnabled();
+ return base::make_unique<compiler::ModuleEnv>(
+ module, function_tables, std::vector<Handle<Code>>{}, illegal_builtin,
+ use_trap_handler);
}
// TODO(mtrofin): remove code_table when we don't need FLAG_wasm_jit_to_native
@@ -1945,7 +2020,7 @@ Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
compiler::ModuleEnv* env) {
Handle<WasmCompiledModule> compiled_module =
WasmCompiledModule::New(isolate, module, code_table, export_wrappers,
- env->function_tables, env->signature_tables);
+ env->function_tables, env->use_trap_handler);
return compiled_module;
}
@@ -2047,8 +2122,9 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
native_module_ = compiled_module->GetNativeModule();
compiled_module->OnWasmModuleDecodingComplete(shared);
if (lazy_compile && FLAG_wasm_jit_to_native) {
- compiled_module->set_lazy_compile_data(isolate_->factory()->NewFixedArray(
- static_cast<int>(module_->functions.size()), TENURED));
+ Handle<FixedArray> lazy_compile_data = isolate_->factory()->NewFixedArray(
+ static_cast<int>(module_->functions.size()), TENURED);
+ compiled_module->set_lazy_compile_data(*lazy_compile_data);
}
if (!lazy_compile) {
@@ -2122,7 +2198,7 @@ InstanceBuilder::InstanceBuilder(
MaybeHandle<JSArrayBuffer> memory,
WeakCallbackInfo<void>::Callback instance_finalizer_callback)
: isolate_(isolate),
- module_(module_object->compiled_module()->module()),
+ module_(module_object->compiled_module()->shared()->module()),
async_counters_(isolate->async_counters()),
thrower_(thrower),
module_object_(module_object),
@@ -2205,13 +2281,13 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
native_module = compiled_module_->GetNativeModule();
- wrapper_table = compiled_module_->export_wrappers();
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
} else {
TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = original->code_table();
+ old_code_table = handle(original->code_table(), isolate_);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- code_table = compiled_module_->code_table();
- wrapper_table = compiled_module_->export_wrappers();
+ code_table = handle(compiled_module_->code_table(), isolate_);
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
// Avoid creating too many handles in the outer scope.
HandleScope scope(isolate_);
@@ -2261,21 +2337,27 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
} else {
// There was no owner, so we can reuse the original.
compiled_module_ = original;
- wrapper_table = compiled_module_->export_wrappers();
+ wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
if (FLAG_wasm_jit_to_native) {
old_module = compiled_module_->GetNativeModule();
native_module = old_module;
TRACE("Reusing existing instance %zu\n",
compiled_module_->GetNativeModule()->instance_id);
} else {
- old_code_table =
- factory->CopyFixedArray(compiled_module_->code_table());
- code_table = compiled_module_->code_table();
+ code_table = handle(compiled_module_->code_table(), isolate_);
+ old_code_table = factory->CopyFixedArray(code_table);
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
}
- compiled_module_->set_native_context(isolate_->native_context());
+ Handle<WeakCell> weak_native_context =
+ isolate_->factory()->NewWeakCell(isolate_->native_context());
+ compiled_module_->set_weak_native_context(*weak_native_context);
+ }
+ base::Optional<wasm::NativeModuleModificationScope>
+ native_module_modification_scope;
+ if (native_module != nullptr) {
+ native_module_modification_scope.emplace(native_module);
}
//--------------------------------------------------------------------------
@@ -2312,9 +2394,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
int function_table_count = static_cast<int>(module_->function_tables.size());
table_instances_.reserve(module_->function_tables.size());
for (int index = 0; index < function_table_count; ++index) {
- table_instances_.push_back(
- {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
- Handle<FixedArray>::null(), Handle<FixedArray>::null()});
+ table_instances_.emplace_back();
}
//--------------------------------------------------------------------------
@@ -2348,7 +2428,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(trap_handler::UseTrapHandler(),
+ DCHECK_IMPLIES(use_trap_handler(),
module_->is_asm_js() || memory->has_guard_region());
} else if (initial_pages > 0) {
// Allocate memory if the initial size is more than 0 pages.
@@ -2389,7 +2469,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK(table_init.table_index < table_instances_.size());
uint32_t base = EvalUint32InitExpr(table_init.offset);
uint32_t table_size =
- table_instances_[table_init.table_index].function_table->length();
+ table_instances_[table_init.table_index].function_table->length() /
+ compiler::kFunctionTableEntrySize;
if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
table_size)) {
thrower_->LinkError("table initializer is out of bounds");
@@ -2477,7 +2558,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
- if (trap_handler::UseTrapHandler()) {
+ if (use_trap_handler()) {
if (FLAG_wasm_jit_to_native) {
UnpackAndRegisterProtectedInstructions(isolate_, native_module);
} else {
@@ -2498,7 +2579,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
compiled_module_->InsertInChain(*module_object_);
}
module_object_->set_compiled_module(*compiled_module_);
- compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+ compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
instance_finalizer_callback_,
v8::WeakCallbackType::kFinalizer);
@@ -2508,8 +2589,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Debugging support.
//--------------------------------------------------------------------------
// Set all breakpoints that were set on the shared module.
- WasmSharedModuleData::SetBreakpointsOnNewInstance(compiled_module_->shared(),
- instance);
+ WasmSharedModuleData::SetBreakpointsOnNewInstance(
+ handle(compiled_module_->shared(), isolate_), instance);
if (FLAG_wasm_interpret_all && module_->is_wasm()) {
Handle<WasmDebugInfo> debug_info =
@@ -2535,15 +2616,17 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index);
+ isolate_, module_, startup_code, start_index,
+ compiled_module_->use_trap_handler());
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
RecordStats(startup_code, counters());
// Call the JS function.
Handle<Object> undefined = factory->undefined_value();
- // Close the CodeSpaceMemoryModificationScope to execute the start function.
+ // Close the modification scopes, so we can execute the start function.
modification_scope.reset();
+ native_module_modification_scope.reset();
{
// We're OK with JS execution here. The instance is fully setup.
AllowJavascriptExecution allow_js(isolate_);
@@ -2658,8 +2741,8 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
// Load data segments into the memory.
void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) {
- Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
- isolate_);
+ Handle<SeqOneByteString> module_bytes(
+ compiled_module_->shared()->module_bytes(), isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t source_size = segment.source.length();
// Segments of size == 0 are just nops.
@@ -2700,13 +2783,13 @@ void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
void InstanceBuilder::SanitizeImports() {
Handle<SeqOneByteString> module_bytes(
- module_object_->compiled_module()->module_bytes());
+ module_object_->compiled_module()->shared()->module_bytes());
for (size_t index = 0; index < module_->import_table.size(); ++index) {
WasmImport& import = module_->import_table[index];
Handle<String> module_name;
MaybeHandle<String> maybe_module_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, module_bytes, import.module_name);
if (!maybe_module_name.ToHandle(&module_name)) {
thrower_->LinkError("Could not resolve module name for import %zu",
@@ -2716,7 +2799,7 @@ void InstanceBuilder::SanitizeImports() {
Handle<String> import_name;
MaybeHandle<String> maybe_import_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, module_bytes, import.field_name);
if (!maybe_import_name.ToHandle(&import_name)) {
thrower_->LinkError("Could not resolve import name for import %zu",
@@ -2764,10 +2847,15 @@ Handle<FixedArray> InstanceBuilder::SetupWasmToJSImportsTable(
// functions.
int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
+ using compiler::kFunctionTableSignatureOffset;
+ using compiler::kFunctionTableCodeOffset;
+ using compiler::kFunctionTableEntrySize;
int num_imported_functions = 0;
int num_imported_tables = 0;
Handle<FixedArray> js_imports_table = SetupWasmToJSImportsTable(instance);
WasmInstanceMap imported_wasm_instances(isolate_->heap());
+ SetOfNativeModuleModificationScopes set_of_native_module_scopes;
+
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size());
++index) {
@@ -2842,19 +2930,18 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
}
- // Allocate a new dispatch table and signature table.
- int table_size = imported_cur_size;
+ // Allocate a new dispatch table, containing <smi(sig), code> pairs.
+ CHECK_GE(kMaxInt / kFunctionTableEntrySize, imported_cur_size);
+ int table_size = kFunctionTableEntrySize * imported_cur_size;
table_instance.function_table =
isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
- table_instance.signature_table->set(i,
- Smi::FromInt(kInvalidSigIndex));
+ for (int i = kFunctionTableSignatureOffset; i < table_size;
+ i += kFunctionTableEntrySize) {
+ table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
- for (int i = 0; i < table_size; ++i) {
+ for (int i = 0; i < imported_cur_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
// TODO(mtrofin): this is the same logic as WasmTableObject::Set:
// insert in the local table a wrapper from the other module, and add
@@ -2876,8 +2963,10 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
&imported_wasm_instances, instance, 0)
.GetCode();
int sig_index = module_->signature_map.Find(sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(i, *code);
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(i), *code);
} else {
const wasm::WasmCode* exported_code =
target->GetWasmCode().GetWasmCode();
@@ -2896,14 +2985,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
isolate_, target->GetWasmCode(), sig,
reinterpret_cast<Address>(other_context));
+ set_of_native_module_scopes.Add(exporting_module);
wrapper_code = exporting_module->AddExportedWrapper(
wrapper, exported_code->index());
}
int sig_index = module_->signature_map.Find(sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
wrapper_code->instructions().start(), TENURED);
- table_instance.function_table->set(i, *foreign_holder);
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(i), *foreign_holder);
}
}
@@ -2924,7 +3016,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
memory_ = buffer;
uint32_t imported_cur_pages = static_cast<uint32_t>(
- buffer->byte_length()->Number() / WasmModule::kPageSize);
+ buffer->byte_length()->Number() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
thrower_->LinkError(
"memory import %d is smaller than initial %u, got %u", index,
@@ -3060,9 +3152,12 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = trap_handler::UseTrapHandler();
+ const bool enable_guard_regions = use_trap_handler();
+ const bool is_shared_memory =
+ module_->has_shared_memory && i::FLAG_experimental_wasm_threads;
Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
- isolate_, num_pages * WasmModule::kPageSize, enable_guard_regions);
+ isolate_, num_pages * kWasmPageSize, enable_guard_regions,
+ is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (mem_buffer.is_null()) {
thrower_->RangeError("Out of memory: wasm memory");
@@ -3086,7 +3181,8 @@ bool InstanceBuilder::NeedsWrappers() const {
void InstanceBuilder::ProcessExports(
Handle<WasmInstanceObject> instance,
Handle<WasmCompiledModule> compiled_module) {
- Handle<FixedArray> wrapper_table = compiled_module->export_wrappers();
+ Handle<FixedArray> wrapper_table(compiled_module->export_wrappers(),
+ isolate_);
if (NeedsWrappers()) {
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
@@ -3117,22 +3213,24 @@ void InstanceBuilder::ProcessExports(
// Store weak references to all exported functions.
Handle<FixedArray> weak_exported_functions;
if (compiled_module->has_weak_exported_functions()) {
- weak_exported_functions = compiled_module->weak_exported_functions();
+ weak_exported_functions =
+ handle(compiled_module->weak_exported_functions(), isolate_);
} else {
int export_count = 0;
for (WasmExport& exp : module_->export_table) {
if (exp.kind == kExternalFunction) ++export_count;
}
weak_exported_functions = isolate_->factory()->NewFixedArray(export_count);
- compiled_module->set_weak_exported_functions(weak_exported_functions);
+ compiled_module->set_weak_exported_functions(*weak_exported_functions);
}
// Process each export in the export table.
int export_index = 0; // Index into {weak_exported_functions}.
for (WasmExport& exp : module_->export_table) {
- Handle<String> name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, exp.name)
- .ToHandleChecked();
+ Handle<String> name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_), exp.name)
+ .ToHandleChecked();
Handle<JSObject> export_to;
if (module_->is_asm_js() && exp.kind == kExternalFunction &&
String::Equals(name, single_function_name)) {
@@ -3153,9 +3251,11 @@ void InstanceBuilder::ProcessExports(
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
- func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function.name)
- .ToHandleChecked();
+ func_name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_),
+ function.name)
+ .ToHandleChecked();
}
js_function = WasmExportedFunction::New(
isolate_, instance, func_name, function.func_index,
@@ -3246,21 +3346,16 @@ void InstanceBuilder::InitializeTables(
CodeSpecialization* code_specialization) {
size_t function_table_count = module_->function_tables.size();
std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
- std::vector<GlobalHandleAddress> new_signature_tables(function_table_count);
wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
std::vector<GlobalHandleAddress> empty;
std::vector<GlobalHandleAddress>& old_function_tables =
FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
- std::vector<GlobalHandleAddress>& old_signature_tables =
- FLAG_wasm_jit_to_native ? native_module->signature_tables() : empty;
Handle<FixedArray> old_function_tables_gc =
- FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
- : compiled_module_->function_tables();
- Handle<FixedArray> old_signature_tables_gc =
- FLAG_wasm_jit_to_native ? Handle<FixedArray>::null()
- : compiled_module_->signature_tables();
+ FLAG_wasm_jit_to_native
+ ? Handle<FixedArray>::null()
+ : handle(compiled_module_->function_tables(), isolate_);
// function_table_count is 0 or 1, so we just create these objects even if not
// needed for native wasm.
@@ -3269,58 +3364,52 @@ void InstanceBuilder::InitializeTables(
Handle<FixedArray> new_function_tables_gc =
isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
TENURED);
- Handle<FixedArray> new_signature_tables_gc =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
// These go on the instance.
Handle<FixedArray> rooted_function_tables =
isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
TENURED);
- Handle<FixedArray> rooted_signature_tables =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
instance->set_function_tables(*rooted_function_tables);
- instance->set_signature_tables(*rooted_signature_tables);
if (FLAG_wasm_jit_to_native) {
DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
- DCHECK_EQ(old_signature_tables.size(), new_signature_tables.size());
} else {
DCHECK_EQ(old_function_tables_gc->length(),
new_function_tables_gc->length());
- DCHECK_EQ(old_signature_tables_gc->length(),
- new_signature_tables_gc->length());
}
for (size_t index = 0; index < function_table_count; ++index) {
WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
- int table_size = static_cast<int>(table.initial_size);
+ // The table holds <smi(sig), code> pairs.
+ CHECK_GE(kMaxInt / compiler::kFunctionTableEntrySize, table.initial_size);
+ int num_table_entries = static_cast<int>(table.initial_size);
+ int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
table_instance.function_table =
isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
+ for (int i = compiler::kFunctionTableSignatureOffset; i < table_size;
+ i += compiler::kFunctionTableEntrySize) {
// Fill the table with invalid signature indexes so that
// uninitialized entries will always fail the signature check.
- table_instance.signature_table->set(i, Smi::FromInt(kInvalidSigIndex));
+ table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
} else {
// Table is imported, patch table bounds check
- DCHECK_LE(table_size, table_instance.function_table->length());
- code_specialization->PatchTableSize(
- table_size, table_instance.function_table->length());
+ int existing_table_size = table_instance.function_table->length();
+ DCHECK_EQ(0, existing_table_size % compiler::kFunctionTableEntrySize);
+ int existing_num_table_entries =
+ existing_table_size / compiler::kFunctionTableEntrySize;
+ DCHECK_LE(num_table_entries, existing_num_table_entries);
+ code_specialization->PatchTableSize(num_table_entries,
+ existing_num_table_entries);
}
int int_index = static_cast<int>(index);
Handle<FixedArray> global_func_table =
isolate_->global_handles()->Create(*table_instance.function_table);
- Handle<FixedArray> global_sig_table =
- isolate_->global_handles()->Create(*table_instance.signature_table);
// Make the handles weak. The table objects are rooted on the instance, as
// they belong to it. We need the global handles in order to have stable
// pointers to embed in the instance's specialization (wasm compiled code).
@@ -3333,47 +3422,30 @@ void InstanceBuilder::InitializeTables(
reinterpret_cast<Object**>(global_func_table.location()),
global_func_table.location(), &FunctionTableFinalizer,
v8::WeakCallbackType::kFinalizer);
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(global_sig_table.location()),
- global_sig_table.location(), &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
rooted_function_tables->set(int_index, *global_func_table);
- rooted_signature_tables->set(int_index, *global_sig_table);
GlobalHandleAddress new_func_table_addr = global_func_table.address();
- GlobalHandleAddress new_sig_table_addr = global_sig_table.address();
GlobalHandleAddress old_func_table_addr;
- GlobalHandleAddress old_sig_table_addr;
if (!FLAG_wasm_jit_to_native) {
WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
int_index, new_func_table_addr);
- WasmCompiledModule::SetTableValue(isolate_, new_signature_tables_gc,
- int_index, new_sig_table_addr);
old_func_table_addr =
WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- old_sig_table_addr = WasmCompiledModule::GetTableValue(
- *old_signature_tables_gc, int_index);
} else {
new_function_tables[int_index] = new_func_table_addr;
- new_signature_tables[int_index] = new_sig_table_addr;
old_func_table_addr = old_function_tables[int_index];
- old_sig_table_addr = old_signature_tables[int_index];
}
code_specialization->RelocatePointer(old_func_table_addr,
new_func_table_addr);
- code_specialization->RelocatePointer(old_sig_table_addr,
- new_sig_table_addr);
}
if (FLAG_wasm_jit_to_native) {
native_module->function_tables() = new_function_tables;
- native_module->signature_tables() = new_signature_tables;
} else {
- compiled_module_->set_function_tables(new_function_tables_gc);
- compiled_module_->set_signature_tables(new_signature_tables_gc);
+ compiled_module_->set_function_tables(*new_function_tables_gc);
}
}
@@ -3384,13 +3456,6 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
for (int index = 0; index < function_table_count; ++index) {
TableInstance& table_instance = table_instances_[index];
- Handle<FixedArray> all_dispatch_tables;
- if (!table_instance.table_object.is_null()) {
- // Get the existing dispatch table(s) with the WebAssembly.Table object.
- all_dispatch_tables =
- handle(table_instance.table_object->dispatch_tables());
- }
-
// Count the number of table exports for each function (needed for lazy
// compilation).
std::unordered_map<uint32_t, uint32_t> num_table_exports;
@@ -3402,14 +3467,20 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Code::cast(code_table->get(static_cast<int>(func_index)));
// Only increase the counter for lazy compile builtins (it's not
// needed otherwise).
- if (code->is_wasm_code()) continue;
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) {
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION);
+ continue;
+ }
} else {
const wasm::WasmCode* code = native_module->GetCode(func_index);
// Only increase the counter for lazy compile builtins (it's not
// needed otherwise).
- if (code->kind() == wasm::WasmCode::Function) continue;
- DCHECK_EQ(wasm::WasmCode::LazyStub, code->kind());
+ if (code->kind() != wasm::WasmCode::kLazyStub) {
+ DCHECK(code->kind() == wasm::WasmCode::kFunction ||
+ code->kind() == wasm::WasmCode::kWasmToJsWrapper);
+ continue;
+ }
}
++num_table_exports[func_index];
}
@@ -3422,14 +3493,16 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t base = EvalUint32InitExpr(table_init.offset);
uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
DCHECK(in_bounds(base, num_entries,
- table_instance.function_table->length()));
+ table_instance.function_table->length() /
+ compiler::kFunctionTableEntrySize));
for (uint32_t i = 0; i < num_entries; ++i) {
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
uint32_t sig_index = module_->signature_ids[function->sig_index];
- table_instance.signature_table->set(table_index,
- Smi::FromInt(sig_index));
+ table_instance.function_table->set(
+ compiler::FunctionTableSigOffset(table_index),
+ Smi::FromInt(sig_index));
WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
isolate_, instance, code_table, native_module, func_index,
table_instance.function_table, table_index, &num_table_exports);
@@ -3437,13 +3510,14 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
if (!wasm_code.IsCodeObject()) {
Handle<Foreign> as_foreign = isolate_->factory()->NewForeign(
wasm_code.GetWasmCode()->instructions().start(), TENURED);
- table_instance.function_table->set(table_index, *as_foreign);
value_to_update_with = as_foreign;
} else {
- table_instance.function_table->set(table_index, *wasm_code.GetCode());
value_to_update_with = wasm_code.GetCode();
}
- if (!all_dispatch_tables.is_null()) {
+ table_instance.function_table->set(
+ compiler::FunctionTableCodeOffset(table_index),
+ *value_to_update_with);
+ if (!table_instance.table_object.is_null()) {
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->wasm wrappers for functions are
@@ -3452,13 +3526,16 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Handle<Code> wrapper_code =
js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, wasm_code, func_index);
+ isolate_, module_, wasm_code, func_index,
+ instance->compiled_module()->use_trap_handler());
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
- func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function->name)
- .ToHandleChecked();
+ func_name =
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate_, handle(compiled_module_->shared(), isolate_),
+ function->name)
+ .ToHandleChecked();
}
Handle<WasmExportedFunction> js_function =
WasmExportedFunction::New(
@@ -3486,13 +3563,14 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
Code::WASM_TO_WASM_FUNCTION);
} else {
DCHECK(wasm_code.GetWasmCode()->kind() ==
- WasmCode::WasmToJsWrapper ||
+ WasmCode::kWasmToJsWrapper ||
wasm_code.GetWasmCode()->kind() ==
- WasmCode::WasmToWasmWrapper);
+ WasmCode::kWasmToWasmWrapper);
}
}
- UpdateDispatchTables(isolate_, all_dispatch_tables, table_index,
- function, value_to_update_with);
+ WasmTableObject::UpdateDispatchTables(table_instance.table_object,
+ table_index, function->sig,
+ value_to_update_with);
}
}
}
@@ -3510,9 +3588,9 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
// initialized.
if (!table_instance.table_object.is_null()) {
// Add the new dispatch table to the WebAssembly.Table object.
- all_dispatch_tables = WasmTableObject::AddDispatchTable(
- isolate_, table_instance.table_object, instance, index,
- table_instance.function_table, table_instance.signature_table);
+ WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
+ instance, index,
+ table_instance.function_table);
}
}
}
@@ -3544,7 +3622,7 @@ void AsyncCompileJob::Abort() {
background_task_manager_.CancelAndWait();
if (num_pending_foreground_tasks_ == 0) {
// No task is pending, we can just remove the AsyncCompileJob.
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
} else {
// There is still a compilation task in the task queue. We enter the
// AbortCompilation state and wait for this compilation task to abort the
@@ -3582,6 +3660,8 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
// Finishes the AsyncCOmpileJob with an error.
void FinishAsyncCompileJobWithError(ResultBase result);
+ void CommitCompilationUnits();
+
ModuleDecoder decoder_;
AsyncCompileJob* job_;
std::unique_ptr<ModuleCompiler::CompilationUnitBuilder>
@@ -3605,14 +3685,14 @@ void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
if (stream_) stream_->NotifyError();
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
RejectPromise(isolate_, context_, thrower, module_promise_);
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_compilation_manager()->RemoveJob(this);
+ isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
ResolvePromise(isolate_, context_, module_promise_, result);
}
@@ -3888,7 +3968,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
DisallowHeapAllocation no_allocation;
if (!job_->compiler_->FetchAndExecuteCompilationUnit(
StartFinishCompilationUnit)) {
- finished_ = true;
break;
}
}
@@ -3913,7 +3992,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
while (true) {
- if (!finished_ && job_->compiler_->ShouldIncreaseWorkload()) {
+ if (job_->compiler_->ShouldIncreaseWorkload()) {
job_->RestartBackgroundTasks();
}
@@ -3969,7 +4048,6 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
private:
std::atomic<bool> failed_{false};
- std::atomic<bool> finished_{false};
};
//==========================================================================
@@ -4026,7 +4104,7 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
// Finish the wasm script now and make it public to the debugger.
job_->isolate_->debug()->OnAfterCompile(
- handle(job_->compiled_module_->script()));
+ handle(job_->compiled_module_->shared()->script()));
// TODO(wasm): compiling wrappers should be made async as well.
job_->DoSync<CompileWrappers>();
@@ -4066,7 +4144,7 @@ class AsyncCompileJob::FinishModule : public CompileStep {
class AsyncCompileJob::AbortCompilation : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("Abort asynchronous compilation ...\n");
- job_->isolate_->wasm_compilation_manager()->RemoveJob(job_);
+ job_->isolate_->wasm_engine()->compilation_manager()->RemoveJob(job_);
}
};
@@ -4095,7 +4173,10 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
}
- compilation_unit_builder_->Clear();
+ // Clear the {compilation_unit_builder_} if it exists. This is needed
+ // because there is a check in the destructor of the
+ // {CompilationUnitBuilder} that it is empty.
+ if (compilation_unit_builder_) compilation_unit_builder_->Clear();
} else {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
}
@@ -4119,6 +4200,12 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process section %d ...\n", section_code);
+ if (compilation_unit_builder_) {
+ // We reached a section after the code section, we do not need the the
+ // compilation_unit_builder_ anymore.
+ CommitCompilationUnits();
+ compilation_unit_builder_.reset();
+ }
if (section_code == SectionCode::kUnknownSectionCode) {
// No need to decode unknown sections, even the names section. If decoding
// of the unknown section fails, compilation should succeed anyways, and
@@ -4186,14 +4273,19 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
return true;
}
-void AsyncStreamingProcessor::OnFinishedChunk() {
- // TRACE_STREAMING("FinishChunk...\n");
- if (compilation_unit_builder_) {
- compilation_unit_builder_->Commit();
+void AsyncStreamingProcessor::CommitCompilationUnits() {
+ DCHECK(compilation_unit_builder_);
+ if (compilation_unit_builder_->Commit()) {
+ // Only restart background tasks when compilation units were committed.
job_->RestartBackgroundTasks();
}
}
+void AsyncStreamingProcessor::OnFinishedChunk() {
+ TRACE_STREAMING("FinishChunk...\n");
+ if (compilation_unit_builder_) CommitCompilationUnits();
+}
+
// Finish the processing of the stream.
void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
size_t length) {
@@ -4234,15 +4326,18 @@ void CompileJsToWasmWrappers(Isolate* isolate,
Counters* counters) {
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- Handle<FixedArray> export_wrappers = compiled_module->export_wrappers();
+ Handle<FixedArray> export_wrappers(compiled_module->export_wrappers(),
+ isolate);
+ Handle<FixedArray> code_table(compiled_module->code_table(), isolate);
NativeModule* native_module = compiled_module->GetNativeModule();
- for (auto exp : compiled_module->module()->export_table) {
+ for (auto exp : compiled_module->shared()->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
- WasmCodeWrapper wasm_code = EnsureExportedLazyDeoptData(
- isolate, Handle<WasmInstanceObject>::null(),
- compiled_module->code_table(), native_module, exp.index);
+ WasmCodeWrapper wasm_code =
+ EnsureExportedLazyDeoptData(isolate, Handle<WasmInstanceObject>::null(),
+ code_table, native_module, exp.index);
Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- isolate, compiled_module->module(), wasm_code, exp.index);
+ isolate, compiled_module->shared()->module(), wasm_code, exp.index,
+ compiled_module->use_trap_handler());
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, counters);
++wrapper_index;
@@ -4283,6 +4378,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace internal
} // namespace v8
+#undef WasmPatchWasmToWasmWrapper
#undef TRACE
#undef TRACE_CHAIN
#undef TRACE_COMPILE
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 864af287cf..3a8b1972d6 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -23,9 +23,6 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
-V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
- const ModuleWireBytes& bytes);
-
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
@@ -43,7 +40,8 @@ V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes);
+ const ModuleWireBytes& bytes,
+ bool is_shared);
V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
Handle<JSPromise> promise,
@@ -95,6 +93,20 @@ class LazyCompilationOrchestrator {
const wasm::WasmCode* CompileIndirectCall(Isolate*,
Handle<WasmInstanceObject>,
uint32_t func_index);
+
+#ifdef DEBUG
+ // Call this method in tests to disallow any further lazy compilation; then
+ // call into the wasm instance again to verify that no lazy compilation is
+ // triggered.
+ void FreezeLazyCompilationForTesting() { frozen_ = true; }
+ bool IsFrozenForTesting() const { return frozen_; }
+
+ private:
+ bool frozen_;
+#else
+ void FreezeLazyCompilationForTesting() {}
+ bool IsFrozenForTesting() { return false; }
+#endif
};
// Encapsulates all the state and steps of an asynchronous compilation.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 1176c56935..010f191263 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -21,14 +21,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
+
namespace {
constexpr char kNameString[] = "name";
@@ -40,6 +37,20 @@ constexpr size_t num_chars(const char (&)[N]) {
return N - 1; // remove null character at end.
}
+const char* ExternalKindName(ImportExportKindCode kind) {
+ switch (kind) {
+ case kExternalFunction:
+ return "function";
+ case kExternalTable:
+ return "table";
+ case kExternalMemory:
+ return "memory";
+ case kExternalGlobal:
+ return "global";
+ }
+ return "unknown";
+}
+
} // namespace
const char* SectionName(SectionCode code) {
@@ -299,7 +310,7 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc_;
uint32_t magic_word = consume_u32("wasm magic");
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
if (magic_word != kWasmMagic) {
errorf(pos,
"expected magic word %02x %02x %02x %02x, "
@@ -446,7 +457,8 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc_;
import->module_name = consume_string(true, "module name");
import->field_name = consume_string(true, "field name");
- import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
+ import->kind =
+ static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
case kExternalFunction: {
// ===== Imported function =======================================
@@ -472,7 +484,7 @@ class ModuleDecoderImpl : public Decoder {
module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
table->imported = true;
- expect_u8("element type", kWasmAnyFunctionTypeForm);
+ expect_u8("element type", kWasmAnyFunctionTypeCode);
consume_resizable_limits(
"element count", "elements", FLAG_wasm_max_table_size,
&table->initial_size, &table->has_maximum_size,
@@ -538,7 +550,7 @@ class ModuleDecoderImpl : public Decoder {
if (!AddTable(module_.get())) break;
module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
- expect_u8("table type", kWasmAnyFunctionTypeForm);
+ expect_u8("table type", kWasmAnyFunctionTypeCode);
consume_resizable_limits("table elements", "elements",
FLAG_wasm_max_table_size, &table->initial_size,
&table->has_maximum_size,
@@ -591,7 +603,7 @@ class ModuleDecoderImpl : public Decoder {
exp->name = consume_string(true, "field name");
const byte* pos = pc();
- exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
+ exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
switch (exp->kind) {
case kExternalFunction: {
WasmFunction* func = nullptr;
@@ -784,12 +796,12 @@ class ModuleDecoderImpl : public Decoder {
// Decode function names, ignore the rest.
// Local names will be decoded when needed.
switch (name_type) {
- case NameSectionType::kModule: {
+ case NameSectionKindCode::kModule: {
WireBytesRef name = wasm::consume_string(inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
break;
}
- case NameSectionType::kFunction: {
+ case NameSectionKindCode::kFunction: {
uint32_t functions_count = inner.consume_u32v("functions count");
for (; inner.ok() && functions_count > 0; --functions_count) {
@@ -1118,7 +1130,7 @@ class ModuleDecoderImpl : public Decoder {
if (FLAG_experimental_wasm_threads) {
bool is_memory = (strcmp(name, "memory") == 0);
- if (flags & 0xfc || (!is_memory && (flags & 0xfe))) {
+ if (flags & 0xFC || (!is_memory && (flags & 0xFE))) {
errorf(pos - 1, "invalid %s limits flags", name);
}
if (flags == 3) {
@@ -1130,7 +1142,7 @@ class ModuleDecoderImpl : public Decoder {
name);
}
} else {
- if (flags & 0xfe) {
+ if (flags & 0xFE) {
errorf(pos - 1, "invalid %s limits flags", name);
}
}
@@ -1292,7 +1304,7 @@ class ModuleDecoderImpl : public Decoder {
private:
FunctionSig* consume_sig_internal(Zone* zone, bool has_return_values) {
- if (has_return_values && !expect_u8("type form", kWasmFunctionTypeForm))
+ if (has_return_values && !expect_u8("type form", kWasmFunctionTypeCode))
return nullptr;
// parse parameter types
uint32_t param_count =
@@ -1586,7 +1598,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionType::kLocal) {
+ if (name_type != NameSectionKindCode::kLocal) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 8b36205ed3..f98a5ed66d 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -19,41 +20,6 @@ struct ModuleEnv;
namespace wasm {
-const uint8_t kWasmFunctionTypeForm = 0x60;
-const uint8_t kWasmAnyFunctionTypeForm = 0x70;
-const uint8_t kHasMaximumFlag = 1;
-const uint8_t kNoMaximumFlag = 0;
-
-enum MemoryFlags : uint8_t {
- kNoMaximum = 0,
- kMaximum = 1,
- kSharedNoMaximum = 2,
- kSharedAndMaximum = 3
-};
-
-enum SectionCode : int8_t {
- kUnknownSectionCode = 0, // code for unknown sections
- kTypeSectionCode = 1, // Function signature declarations
- kImportSectionCode = 2, // Import declarations
- kFunctionSectionCode = 3, // Function declarations
- kTableSectionCode = 4, // Indirect function table and other tables
- kMemorySectionCode = 5, // Memory attributes
- kGlobalSectionCode = 6, // Global declarations
- kExportSectionCode = 7, // Exports
- kStartSectionCode = 8, // Start function declaration
- kElementSectionCode = 9, // Elements section
- kCodeSectionCode = 10, // Function code
- kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
- kExceptionSectionCode = 13, // Exception section
-
- // Helper values
- kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kExceptionSectionCode,
-};
-
-enum NameSectionType : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
-
inline bool IsValidSectionCode(uint8_t byte) {
return kTypeSectionCode <= byte && byte <= kLastKnownModuleSection;
}
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 2387edba34..1b5eaab332 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -66,7 +66,7 @@ void StreamingDecoder::Finish() {
std::unique_ptr<uint8_t[]> bytes(new uint8_t[total_size_]);
uint8_t* cursor = bytes.get();
{
-#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
#undef BYTES
memcpy(cursor, module_header, arraysize(module_header));
@@ -92,7 +92,9 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
+
uint8_t* buffer() override { return byte_buffer_; }
+
size_t size() const override { return kMaxVarInt32Size; }
size_t ReadBytes(StreamingDecoder* streaming,
@@ -103,10 +105,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
virtual std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) = 0;
- size_t value() const { return value_; }
- size_t bytes_consumed() const { return bytes_consumed_; }
-
- private:
+ protected:
uint8_t byte_buffer_[kMaxVarInt32Size];
// The maximum valid value decoded in this state. {Next} returns an error if
// this value is exceeded.
@@ -141,10 +140,6 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
uint8_t* buffer() override { return &id_; }
bool is_finishing_allowed() const override { return true; }
- uint8_t id() const { return id_; }
-
- uint32_t module_offset() const { return module_offset_; }
-
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
@@ -160,10 +155,6 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
section_id_(id),
module_offset_(module_offset) {}
- uint8_t section_id() const { return section_id_; }
-
- uint32_t module_offset() const { return module_offset_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -179,14 +170,13 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
: section_buffer_(section_buffer) {}
size_t size() const override { return section_buffer_->payload_length(); }
+
uint8_t* buffer() override {
return section_buffer_->bytes() + section_buffer_->payload_offset();
}
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
private:
SectionBuffer* section_buffer_;
};
@@ -197,8 +187,6 @@ class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
: DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
section_buffer_(section_buffer) {}
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -219,10 +207,6 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
DCHECK_GT(num_remaining_functions, 0);
}
- size_t num_remaining_functions() const { return num_remaining_functions_; }
- size_t buffer_offset() const { return buffer_offset_; }
- SectionBuffer* section_buffer() const { return section_buffer_; }
-
std::unique_ptr<DecodingState> NextWithValue(
StreamingDecoder* streaming) override;
@@ -244,14 +228,11 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
num_remaining_functions_(num_remaining_functions),
module_offset_(module_offset) {}
- size_t buffer_offset() const { return buffer_offset_; }
size_t size() const override { return size_; }
+
uint8_t* buffer() override {
return section_buffer_->bytes() + buffer_offset_;
}
- size_t num_remaining_functions() const { return num_remaining_functions_; }
- uint32_t module_offset() const { return module_offset_; }
- SectionBuffer* section_buffer() const { return section_buffer_; }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -297,9 +278,9 @@ StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
if (!streaming->ok()) {
return nullptr;
}
- if (value() > max_value_) {
+ if (value_ > max_value_) {
std::ostringstream oss;
- oss << "function size > maximum function size: " << value() << " < "
+ oss << "function size > maximum function size: " << value_ << " < "
<< max_value_;
return streaming->Error(oss.str());
}
@@ -320,32 +301,32 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
- SectionName(static_cast<SectionCode>(id())));
- return base::make_unique<DecodeSectionLength>(id(), module_offset());
+ SectionName(static_cast<SectionCode>(id_)));
+ return base::make_unique<DecodeSectionLength>(id_, module_offset_);
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeSectionLength(%zu)\n", value());
+ TRACE_STREAMING("DecodeSectionLength(%zu)\n", value_);
SectionBuffer* buf = streaming->CreateNewBuffer(
- module_offset(), section_id(), value(),
- Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed())));
+ module_offset_, section_id_, value_,
+ Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed_)));
if (!buf) return nullptr;
- if (value() == 0) {
- if (section_id() == SectionCode::kCodeSectionCode) {
+ if (value_ == 0) {
+ if (section_id_ == SectionCode::kCodeSectionCode) {
return streaming->Error("Code section cannot have size 0");
} else {
streaming->ProcessSection(buf);
if (streaming->ok()) {
// There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ return base::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
return nullptr;
}
}
} else {
- if (section_id() == SectionCode::kCodeSectionCode) {
+ if (section_id_ == SectionCode::kCodeSectionCode) {
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
return base::make_unique<DecodeNumberOfFunctions>(buf);
@@ -358,7 +339,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
- streaming->ProcessSection(section_buffer());
+ streaming->ProcessSection(section_buffer_);
if (streaming->ok()) {
return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
@@ -368,24 +349,24 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value());
+ TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
- if (section_buffer()->payload_length() >= bytes_consumed()) {
- memcpy(section_buffer()->bytes() + section_buffer()->payload_offset(),
- buffer(), bytes_consumed());
+ if (section_buffer_->payload_length() >= bytes_consumed_) {
+ memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
+ buffer(), bytes_consumed_);
} else {
return streaming->Error("Invalid code section length");
}
// {value} is the number of functions.
- if (value() > 0) {
- streaming->StartCodeSection(value());
+ if (value_ > 0) {
+ streaming->StartCodeSection(value_);
if (!streaming->ok()) return nullptr;
return base::make_unique<DecodeFunctionLength>(
- section_buffer(), section_buffer()->payload_offset() + bytes_consumed(),
- value());
+ section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
+ value_);
} else {
- if (section_buffer()->payload_length() != bytes_consumed()) {
+ if (section_buffer_->payload_length() != bytes_consumed_) {
return streaming->Error("not all code section bytes were consumed");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
@@ -395,27 +376,27 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionLength::NextWithValue(
StreamingDecoder* streaming) {
- TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value());
+ TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
- if (section_buffer_->length() >= buffer_offset_ + bytes_consumed()) {
+ if (section_buffer_->length() >= buffer_offset_ + bytes_consumed_) {
memcpy(section_buffer_->bytes() + buffer_offset_, buffer(),
- bytes_consumed());
+ bytes_consumed_);
} else {
return streaming->Error("Invalid code section length");
}
// {value} is the length of the function.
- if (value() == 0) {
+ if (value_ == 0) {
return streaming->Error("Invalid function length (0)");
- } else if (buffer_offset() + bytes_consumed() + value() >
- section_buffer()->length()) {
+ } else if (buffer_offset_ + bytes_consumed_ + value_ >
+ section_buffer_->length()) {
streaming->Error("not enough code section bytes");
return nullptr;
}
return base::make_unique<DecodeFunctionBody>(
- section_buffer(), buffer_offset() + bytes_consumed(), value(),
- num_remaining_functions(), streaming->module_offset());
+ section_buffer_, buffer_offset_ + bytes_consumed_, value_,
+ num_remaining_functions_, streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -423,15 +404,15 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionBody\n");
streaming->ProcessFunctionBody(
Vector<const uint8_t>(buffer(), static_cast<int>(size())),
- module_offset());
+ module_offset_);
if (!streaming->ok()) {
return nullptr;
}
- if (num_remaining_functions() != 0) {
+ if (num_remaining_functions_ != 0) {
return base::make_unique<DecodeFunctionLength>(
- section_buffer(), buffer_offset() + size(), num_remaining_functions());
+ section_buffer_, buffer_offset_ + size(), num_remaining_functions_);
} else {
- if (buffer_offset() + size() != section_buffer()->length()) {
+ if (buffer_offset_ + size() != section_buffer_->length()) {
return streaming->Error("not all code section bytes were used");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
diff --git a/deps/v8/src/wasm/wasm-heap.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index e111ec55f5..8e46f33b01 100644
--- a/deps/v8/src/wasm/wasm-heap.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/wasm/wasm-heap.h"
+#include "src/wasm/wasm-code-manager.h"
+
+#include <iomanip>
#include "src/assembler-inl.h"
#include "src/base/atomic-utils.h"
@@ -39,6 +41,15 @@ void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
__ jmp(kScratchRegister);
}
#undef __
+#elif V8_TARGET_ARCH_S390X
+#define __ masm->
+constexpr bool kModuleCanAllocateMoreMemory = false;
+
+void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
+ __ mov(ip, Operand(bit_cast<intptr_t, Address>(target)));
+ __ b(ip);
+}
+#undef __
#else
const bool kModuleCanAllocateMoreMemory = true;
#endif
@@ -53,7 +64,7 @@ void PatchTrampolineAndStubCalls(
new_code->constant_pool(), RelocInfo::kCodeTargetMask);
!it.done(); it.next(), orig_it.next()) {
Address old_target = orig_it.rinfo()->target_address();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
auto found = reverse_lookup.find(old_target);
DCHECK(found != reverse_lookup.end());
Address new_target = found->second;
@@ -176,18 +187,72 @@ bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
-// TODO(mtrofin): rework the dependency on isolate and code in
-// Disassembler::Decode.
-void WasmCode::Disassemble(Isolate* isolate, const char* name,
+void WasmCode::Print(Isolate* isolate) const {
+ OFStream os(stdout);
+ Disassemble(nullptr, isolate, os);
+}
+
+void WasmCode::Disassemble(const char* name, Isolate* isolate,
std::ostream& os) const {
- os << name << std::endl;
+ if (name) os << "name: " << name << "\n";
+ if (index_.IsJust()) os << "index: " << index_.FromJust() << "\n";
+ os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
+ os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
+ size_t body_size = instructions().size();
+ os << "Body (size = " << body_size << ")\n";
+
+#ifdef ENABLE_DISASSEMBLER
+
+ size_t instruction_size =
+ std::min(constant_pool_offset_, safepoint_table_offset_);
+ os << "Instructions (size = " << instruction_size << ")\n";
+ // TODO(mtrofin): rework the dependency on isolate and code in
+ // Disassembler::Decode.
Disassembler::Decode(isolate, &os, instructions().start(),
- instructions().end(), nullptr);
+ instructions().start() + instruction_size, nullptr);
+ os << "\n";
+
+ Object* source_positions_or_undef =
+ owner_->compiled_module()->source_positions()->get(index());
+ if (!source_positions_or_undef->IsUndefined(isolate)) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(
+ ByteArray::cast(source_positions_or_undef));
+ !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
+ }
+
+ os << "RelocInfo (size = " << reloc_size_ << ")\n";
+ for (RelocIterator it(instructions(), reloc_info(), constant_pool());
+ !it.done(); it.next()) {
+ it.rinfo()->Print(isolate, os);
+ }
+ os << "\n";
+#endif // ENABLE_DISASSEMBLER
}
-void WasmCode::Print(Isolate* isolate) const {
- OFStream os(stdout);
- Disassemble(isolate, "", os);
+const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
+ switch (kind) {
+ case WasmCode::kFunction:
+ return "wasm function";
+ case WasmCode::kWasmToWasmWrapper:
+ return "wasm-to-wasm";
+ case WasmCode::kWasmToJsWrapper:
+ return "wasm-to-js";
+ case WasmCode::kLazyStub:
+ return "lazy-compile";
+ case WasmCode::kInterpreterStub:
+ return "interpreter-entry";
+ case WasmCode::kCopiedStub:
+ return "copied stub";
+ case WasmCode::kTrampoline:
+ return "trampoline";
+ }
+ return "unknown kind";
}
WasmCode::~WasmCode() {
@@ -226,12 +291,16 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
code_table_.resize(new_size);
int grow_by = static_cast<int>(new_size) -
compiled_module()->source_positions()->length();
- compiled_module()->set_source_positions(
- isolate->factory()->CopyFixedArrayAndGrow(
- compiled_module()->source_positions(), grow_by, TENURED));
- compiled_module()->set_handler_table(
- isolate->factory()->CopyFixedArrayAndGrow(
- compiled_module()->handler_table(), grow_by, TENURED));
+ Handle<FixedArray> source_positions(compiled_module()->source_positions(),
+ isolate);
+ source_positions = isolate->factory()->CopyFixedArrayAndGrow(
+ source_positions, grow_by, TENURED);
+ compiled_module()->set_source_positions(*source_positions);
+ Handle<FixedArray> handler_table(compiled_module()->handler_table(),
+ isolate);
+ handler_table = isolate->factory()->CopyFixedArrayAndGrow(handler_table,
+ grow_by, TENURED);
+ compiled_module()->set_handler_table(*handler_table);
}
}
@@ -246,7 +315,7 @@ uint32_t NativeModule::FunctionCount() const {
WasmCode* NativeModule::AddOwnedCode(
Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
+ std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
@@ -261,7 +330,7 @@ WasmCode* NativeModule::AddOwnedCode(
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, protected_instructions, is_liftoff));
+ safepoint_table_offset, std::move(protected_instructions), is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
@@ -270,6 +339,9 @@ WasmCode* NativeModule::AddOwnedCode(
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
+ wasm_code_manager_->FlushICache(ret->instructions().start(),
+ ret->instructions().size());
+
return ret;
}
@@ -278,23 +350,23 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
WasmCode* ret = AddAnonymousCode(code, kind);
SetCodeTable(index, ret);
ret->index_ = Just(index);
- compiled_module()->ptr_to_source_positions()->set(
- static_cast<int>(index), code->source_position_table());
- compiled_module()->ptr_to_handler_table()->set(static_cast<int>(index),
- code->handler_table());
+ compiled_module()->source_positions()->set(static_cast<int>(index),
+ code->source_position_table());
+ compiled_module()->handler_table()->set(static_cast<int>(index),
+ code->handler_table());
return ret;
}
WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::InterpreterStub);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterStub);
ret->index_ = Just(index);
return ret;
}
WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
DCHECK_NULL(lazy_builtin_);
- lazy_builtin_ = AddAnonymousCode(code, WasmCode::LazyStub);
+ lazy_builtin_ = AddAnonymousCode(code, WasmCode::kLazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
SetCodeTable(i, lazy_builtin_);
@@ -325,8 +397,8 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
- (code->is_turbofanned() ? code->stack_slots() : 0),
- (code->is_turbofanned() ? code->safepoint_table_offset() : 0), {});
+ (code->has_safepoint_info() ? code->stack_slots() : 0),
+ (code->has_safepoint_info() ? code->safepoint_table_offset() : 0), {});
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
@@ -356,7 +428,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
size_t safepoint_table_offset,
- std::shared_ptr<ProtectedInstructions> protected_instructions,
+ std::unique_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
if (desc.reloc_size) {
@@ -368,8 +440,9 @@ WasmCode* NativeModule::AddCode(
WasmCode* ret = AddOwnedCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
- WasmCode::Function, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, protected_instructions, is_liftoff);
+ WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
+ frame_slots, safepoint_table_offset, std::move(protected_instructions),
+ is_liftoff);
if (ret == nullptr) return nullptr;
SetCodeTable(index, ret);
@@ -408,7 +481,7 @@ WasmCode* NativeModule::AddCode(
return ret;
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
Address dest = code->instruction_start();
@@ -417,7 +490,7 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::Trampoline, 0, 0, 0, {});
+ Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, {});
if (wasm_code == nullptr) return nullptr;
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
@@ -438,7 +511,7 @@ Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
uint32_t key = code->stub_key();
auto copy = stubs_.find(key);
if (copy == stubs_.end()) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::CopiedStub);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kCopiedStub);
copy = stubs_.emplace(std::make_pair(key, ret)).first;
}
return copy->second->instructions().start();
@@ -462,7 +535,7 @@ WasmCode* NativeModule::GetExportedWrapper(uint32_t index) {
}
WasmCode* NativeModule::AddExportedWrapper(Handle<Code> code, uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::WasmToWasmWrapper);
+ WasmCode* ret = AddAnonymousCode(code, WasmCode::kWasmToWasmWrapper);
ret->index_ = Just(index);
exported_wasm_to_wasm_wrappers_.insert(std::make_pair(index, ret));
return ret;
@@ -482,8 +555,7 @@ void NativeModule::Link(uint32_t index) {
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(), mode_mask);
!it.done(); it.next()) {
- uint32_t index =
- *(reinterpret_cast<uint32_t*>(it.rinfo()->target_address_address()));
+ uint32_t index = GetWasmCalleeTag(it.rinfo());
const WasmCode* target = GetCode(index);
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
@@ -519,8 +591,8 @@ Address NativeModule::AllocateForCode(size_t size) {
}
Address ret = mem.ranges().front().first;
Address end = ret + size;
- Address commit_start = RoundUp(ret, base::OS::AllocatePageSize());
- Address commit_end = RoundUp(end, base::OS::AllocatePageSize());
+ Address commit_start = RoundUp(ret, AllocatePageSize());
+ Address commit_end = RoundUp(end, AllocatePageSize());
// {commit_start} will be either ret or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
@@ -542,7 +614,7 @@ Address NativeModule::AllocateForCode(size_t size) {
Address start =
std::max(commit_start, reinterpret_cast<Address>(it->address()));
size_t commit_size = static_cast<size_t>(commit_end - start);
- DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return nullptr;
}
@@ -551,7 +623,7 @@ Address NativeModule::AllocateForCode(size_t size) {
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
- DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(commit_size, AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return nullptr;
}
@@ -651,9 +723,8 @@ WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
}
bool WasmCodeManager::Commit(Address start, size_t size) {
- DCHECK(
- IsAligned(reinterpret_cast<size_t>(start), base::OS::AllocatePageSize()));
- DCHECK(IsAligned(size, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(reinterpret_cast<size_t>(start), AllocatePageSize()));
+ DCHECK(IsAligned(size, AllocatePageSize()));
if (size > static_cast<size_t>(std::numeric_limits<intptr_t>::max())) {
return false;
}
@@ -663,9 +734,14 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
remaining_uncommitted_.Increment(size);
return false;
}
- // TODO(v8:7105) Enable W^X instead of setting W|X permissions below.
- bool ret = base::OS::SetPermissions(
- start, size, base::OS::MemoryPermission::kReadWriteExecute);
+ PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
+ ? PageAllocator::kReadWrite
+ : PageAllocator::kReadWriteExecute;
+
+ bool ret = SetPermissions(start, size, permission);
+ TRACE_HEAP("Setting rw permissions for %p:%p\n",
+ reinterpret_cast<void*>(start),
+ reinterpret_cast<void*>(start + size));
if (!ret) {
// Highly unlikely.
remaining_uncommitted_.Increment(size);
@@ -704,11 +780,11 @@ void WasmCodeManager::AssignRanges(void* start, void* end,
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
DCHECK_GT(size, 0);
- size = RoundUp(size, base::OS::AllocatePageSize());
- if (hint == nullptr) hint = base::OS::GetRandomMmapAddr();
+ size = RoundUp(size, AllocatePageSize());
+ if (hint == nullptr) hint = GetRandomMmapAddr();
- if (!AlignedAllocVirtualMemory(
- size, static_cast<size_t>(base::OS::AllocatePageSize()), hint, ret)) {
+ if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
+ hint, ret)) {
DCHECK(!ret->IsReserved());
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", ret->address(), ret->end(),
@@ -720,7 +796,7 @@ size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
// from something embedder-provided
if (kRequiresCodeRange) return kMaxWasmCodeMemory;
DCHECK(kModuleCanAllocateMoreMemory);
- size_t ret = base::OS::AllocatePageSize();
+ size_t ret = AllocatePageSize();
// a ballpark guesstimate on native inflation factor.
constexpr size_t kMultiplier = 4;
@@ -756,9 +832,56 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
return ret;
}
+ V8::FatalProcessOutOfMemory("WasmCodeManager::NewNativeModule");
return nullptr;
}
+bool NativeModule::SetExecutable(bool executable) {
+ if (is_executable_ == executable) return true;
+ TRACE_HEAP("Setting module %zu as executable: %d.\n", instance_id,
+ executable);
+ PageAllocator::Permission permission =
+ executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
+
+ if (FLAG_wasm_write_protect_code_memory) {
+#if V8_OS_WIN
+ // On windows, we need to switch permissions per separate virtual memory
+ // reservation. This is really just a problem when the NativeModule is
+ // growable (meaning can_request_more_memory_). That's 32-bit in production,
+ // or unittests.
+ // For now, in that case, we commit at reserved memory granularity.
+ // Technically, that may be a waste, because we may reserve more than we
+ // use. On 32-bit though, the scarce resource is the address space -
+ // committed or not.
+ if (can_request_more_memory_) {
+ for (auto& vmem : owned_memory_) {
+ if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
+ executable);
+ }
+ is_executable_ = executable;
+ return true;
+ }
+#endif
+ for (auto& range : allocated_memory_.ranges()) {
+ // allocated_memory_ is fine-grained, so we need to
+ // page-align it.
+ size_t range_size = RoundUp(
+ static_cast<size_t>(range.second - range.first), AllocatePageSize());
+ if (!SetPermissions(range.first, range_size, permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set %p:%p to executable:%d\n",
+ reinterpret_cast<void*>(range.first),
+ reinterpret_cast<void*>(range.second), executable);
+ }
+ }
+ is_executable_ = executable;
+ return true;
+}
+
std::unique_ptr<NativeModule> NativeModule::Clone() {
std::unique_ptr<NativeModule> ret = wasm_code_manager_->NewNativeModule(
owned_memory_.front().size(), FunctionCount(), num_imported_functions(),
@@ -802,14 +925,14 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
- case WasmCode::LazyStub: {
+ case WasmCode::kLazyStub: {
if (original_code->IsAnonymous()) {
ret->SetCodeTable(i, ret->lazy_builtin());
} else {
if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
}
} break;
- case WasmCode::Function: {
+ case WasmCode::kFunction: {
WasmCode* new_code = ret->CloneCode(original_code);
if (new_code == nullptr) return nullptr;
PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
@@ -835,7 +958,7 @@ void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
// which we currently indicate by having the isolate_ as null
if (isolate_ == nullptr) return;
size_t freed_mem = native_module->committed_memory_;
- DCHECK(IsAligned(freed_mem, base::OS::AllocatePageSize()));
+ DCHECK(IsAligned(freed_mem, AllocatePageSize()));
remaining_uncommitted_.Increment(freed_mem);
isolate_->AdjustAmountOfExternalAllocatedMemory(
-static_cast<int64_t>(freed_mem));
@@ -847,7 +970,11 @@ void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
// easily identify those places where we know we have the first
// instruction PC.
WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
- return LookupCode(pc);
+ WasmCode* code = LookupCode(pc);
+ // This method can only be called for valid instruction start addresses.
+ DCHECK_NOT_NULL(code);
+ DCHECK_EQ(pc, code->instructions().start());
+ return code;
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
@@ -880,6 +1007,50 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
+void WasmCodeManager::FlushICache(Address start, size_t size) {
+ Assembler::FlushICache(reinterpret_cast<internal::Isolate*>(isolate_), start,
+ size);
+}
+
+NativeModuleModificationScope::NativeModuleModificationScope(
+ NativeModule* native_module)
+ : native_module_(native_module) {
+ if (native_module_) {
+ bool success = native_module_->SetExecutable(false);
+ CHECK(success);
+ }
+}
+
+NativeModuleModificationScope::~NativeModuleModificationScope() {
+ if (native_module_) {
+ bool success = native_module_->SetExecutable(true);
+ CHECK(success);
+ }
+}
+
+// On Intel, call sites are encoded as a displacement. For linking
+// and for serialization/deserialization, we want to store/retrieve
+// a tag (the function index). On Intel, that means accessing the
+// raw displacement. Everywhere else, that simply means accessing
+// the target address.
+void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
+#else
+ rinfo->set_target_address(nullptr, reinterpret_cast<Address>(tag),
+ SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+#endif
+}
+
+uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ return *(reinterpret_cast<uint32_t*>(rinfo->target_address_address()));
+#else
+ return static_cast<uint32_t>(
+ reinterpret_cast<size_t>(rinfo->target_address()));
+#endif
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-heap.h b/deps/v8/src/wasm/wasm-code-manager.h
index 9775f18b9b..3e2a0918fb 100644
--- a/deps/v8/src/wasm/wasm-heap.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -88,13 +88,13 @@ using ProtectedInstructions =
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
- Function,
- WasmToWasmWrapper,
- WasmToJsWrapper,
- LazyStub,
- InterpreterStub,
- CopiedStub,
- Trampoline
+ kFunction,
+ kWasmToWasmWrapper,
+ kWasmToJsWrapper,
+ kLazyStub,
+ kInterpreterStub,
+ kCopiedStub,
+ kTrampoline
};
Vector<byte> instructions() const { return instructions_; }
@@ -123,8 +123,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
return *protected_instructions_.get();
}
- void Disassemble(Isolate* isolate, const char* name, std::ostream& os) const;
void Print(Isolate* isolate) const;
+ void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
~WasmCode();
@@ -151,7 +151,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
- protected_instructions_(protected_instructions),
+ protected_instructions_(std::move(protected_instructions)),
is_liftoff_(is_liftoff) {}
WasmCode(const WasmCode&) = delete;
@@ -174,6 +174,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
bool is_liftoff_;
};
+// Return a textual description of the kind.
+const char* GetWasmCodeKindAsString(WasmCode::Kind);
+
class WasmCodeManager;
// Note that we currently need to add code on the main thread, because we may
@@ -187,7 +190,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
size_t safepoint_table_offset,
- std::shared_ptr<ProtectedInstructions>,
+ std::unique_ptr<ProtectedInstructions>,
bool is_liftoff = false);
// A way to copy over JS-allocated code. This is because we compile
@@ -228,6 +231,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// this change.
WasmCode* CloneLazyBuiltinInto(uint32_t);
+ bool SetExecutable(bool executable);
+
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ResizeCodeTableForTest(size_t);
@@ -243,16 +248,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::vector<wasm::GlobalHandleAddress>& function_tables() {
return specialization_data_.function_tables;
}
- std::vector<wasm::GlobalHandleAddress>& signature_tables() {
- return specialization_data_.signature_tables;
- }
std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
return specialization_data_.empty_function_tables;
}
- std::vector<wasm::GlobalHandleAddress>& empty_signature_tables() {
- return specialization_data_.empty_signature_tables;
- }
uint32_t num_imported_functions() const { return num_imported_functions_; }
size_t num_function_tables() const {
@@ -292,7 +291,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
- std::unique_ptr<const byte[]>&& reloc_info,
+ std::unique_ptr<const byte[]> reloc_info,
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
@@ -325,15 +324,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
+ bool is_executable_ = false;
// Specialization data that needs to be serialized and cloned.
// Keeping it groupped together because it makes cloning of all these
// elements a 1 line copy.
struct {
std::vector<wasm::GlobalHandleAddress> function_tables;
- std::vector<wasm::GlobalHandleAddress> signature_tables;
std::vector<wasm::GlobalHandleAddress> empty_function_tables;
- std::vector<wasm::GlobalHandleAddress> empty_signature_tables;
} specialization_data_;
};
@@ -358,6 +356,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
+ // TODO(mtrofin): replace this API with an alternative that is Isolate-
+ // independent.
+ void FlushICache(Address start, size_t size);
+
private:
friend class NativeModule;
@@ -380,9 +382,37 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// worth requesting a GC on memory pressure.
size_t active_ = 0;
base::AtomicNumber<intptr_t> remaining_uncommitted_;
+
+ // TODO(mtrofin): remove the dependency on isolate.
v8::Isolate* isolate_;
};
+// Within the scope, the native_module is writable and not executable.
+// At the scope's destruction, the native_module is executable and not writable.
+// The states inside the scope and at the scope termination are irrespective of
+// native_module's state when entering the scope.
+// We currently mark the entire module's memory W^X:
+// - for AOT, that's as efficient as it can be.
+// - for Lazy, we don't have a heuristic for functions that may need patching,
+// and even if we did, the resulting set of pages may be fragmented.
+// Currently, we try and keep the number of syscalls low.
+// - similar argument for debug time.
+class NativeModuleModificationScope final {
+ public:
+ explicit NativeModuleModificationScope(NativeModule* native_module);
+ ~NativeModuleModificationScope();
+
+ private:
+ NativeModule* native_module_;
+};
+
+// Utilities specific to wasm code generation. We embed a tag for call sites -
+// the index of the called function - when serializing and when creating the
+// code, initially. These APIs offer accessors. The implementation has platform
+// specific nuances.
+void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag);
+uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 40a9dac9a3..416d1d600a 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -9,9 +9,9 @@
#include "src/objects-inl.h"
#include "src/source-position-table.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
@@ -50,18 +50,19 @@ class PatchDirectCallsHelper {
decoder(nullptr, nullptr) {
uint32_t func_index = code->index();
WasmCompiledModule* comp_mod = instance->compiled_module();
- func_bytes = comp_mod->module_bytes()->GetChars() +
- comp_mod->module()->functions[func_index].code.offset();
+ func_bytes =
+ comp_mod->shared()->module_bytes()->GetChars() +
+ comp_mod->shared()->module()->functions[func_index].code.offset();
}
PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
: source_pos_it(code->SourcePositionTable()), decoder(nullptr, nullptr) {
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_EQ(2, deopt_data->length());
- WasmCompiledModule* comp_mod = instance->compiled_module();
+ WasmSharedModuleData* shared = instance->compiled_module()->shared();
int func_index = Smi::ToInt(deopt_data->get(1));
- func_bytes = comp_mod->module_bytes()->GetChars() +
- comp_mod->module()->functions[func_index].code.offset();
+ func_bytes = shared->module_bytes()->GetChars() +
+ shared->module()->functions[func_index].code.offset();
}
SourcePositionTableIterator source_pos_it;
@@ -115,12 +116,12 @@ bool CodeSpecialization::ApplyToWholeInstance(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = instance->compiled_module();
NativeModule* native_module = compiled_module->GetNativeModule();
- FixedArray* code_table = compiled_module->ptr_to_code_table();
- WasmModule* module = compiled_module->module();
- std::vector<WasmFunction>* wasm_functions =
- &compiled_module->module()->functions;
+ FixedArray* code_table = compiled_module->code_table();
+ WasmSharedModuleData* shared = compiled_module->shared();
+ WasmModule* module = shared->module();
+ std::vector<WasmFunction>* wasm_functions = &shared->module()->functions;
DCHECK_EQ(compiled_module->export_wrappers()->length(),
- compiled_module->module()->num_exported_functions);
+ shared->module()->num_exported_functions);
bool changed = false;
int func_index = module->num_imported_functions;
@@ -131,7 +132,7 @@ bool CodeSpecialization::ApplyToWholeInstance(
WasmCodeWrapper wrapper;
if (FLAG_wasm_jit_to_native) {
const WasmCode* wasm_function = native_module->GetCode(func_index);
- if (wasm_function->kind() != WasmCode::Function) {
+ if (wasm_function->kind() != WasmCode::kFunction) {
continue;
}
wrapper = WasmCodeWrapper(wasm_function);
@@ -206,7 +207,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
if (code.IsCodeObject()) {
DCHECK_EQ(Code::WASM_FUNCTION, code.GetCode()->kind());
} else {
- DCHECK_EQ(wasm::WasmCode::Function, code.GetWasmCode()->kind());
+ DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
}
bool patch_table_size = old_function_table_size || new_function_table_size;
@@ -261,8 +262,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
FixedArray* code_table =
- relocate_direct_calls_instance->compiled_module()
- ->ptr_to_code_table();
+ relocate_direct_calls_instance->compiled_module()->code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
it.rinfo()->set_target_address(new_code->GetIsolate(),
new_code->instruction_start(),
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
index 28a96d16bf..9256391543 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.cc
+++ b/deps/v8/src/wasm/wasm-code-wrapper.cc
@@ -4,8 +4,10 @@
#include "src/wasm/wasm-code-wrapper.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/objects/code.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -34,5 +36,34 @@ const wasm::WasmCode* WasmCodeWrapper::GetWasmCode() const {
bool WasmCodeWrapper::IsCodeObject() const { return !FLAG_wasm_jit_to_native; }
+#ifdef ENABLE_DISASSEMBLER
+void WasmCodeWrapper::Disassemble(const char* name, Isolate* isolate,
+ std::ostream& os) const {
+ if (IsCodeObject()) {
+ GetCode()->Disassemble(name, os);
+ } else {
+ GetWasmCode()->Disassemble(name, isolate, os);
+ }
+}
+#endif
+
+bool WasmCodeWrapper::is_liftoff() const {
+ return IsCodeObject() ? !GetCode()->is_turbofanned()
+ : GetWasmCode()->is_liftoff();
+}
+
+Vector<uint8_t> WasmCodeWrapper::instructions() const {
+ if (!IsCodeObject()) return GetWasmCode()->instructions();
+ Handle<Code> code = GetCode();
+ return {code->instruction_start(),
+ static_cast<size_t>(code->instruction_size())};
+}
+
+Handle<WasmInstanceObject> WasmCodeWrapper::wasm_instance() const {
+ return IsCodeObject()
+ ? handle(WasmInstanceObject::GetOwningInstanceGC(*GetCode()))
+ : handle(WasmInstanceObject::GetOwningInstance(GetWasmCode()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
index f80aee8056..7d978152f1 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.h
+++ b/deps/v8/src/wasm/wasm-code-wrapper.h
@@ -13,6 +13,7 @@ class WasmCode;
} // namespace wasm
class Code;
+class WasmInstanceObject;
// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
class WasmCodeWrapper {
@@ -25,6 +26,15 @@ class WasmCodeWrapper {
const wasm::WasmCode* GetWasmCode() const;
bool is_null() const { return code_ptr_.wasm_code_ == nullptr; }
bool IsCodeObject() const;
+ bool is_liftoff() const;
+
+ Vector<uint8_t> instructions() const;
+
+ Handle<WasmInstanceObject> wasm_instance() const;
+
+#ifdef ENABLE_DISASSEMBLER
+ void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
+#endif
private:
union {
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
new file mode 100644
index 0000000000..5e7ce1e4f5
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_CONSTANTS_H_
+#define V8_WASM_CONSTANTS_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Binary encoding of the module header.
+constexpr uint32_t kWasmMagic = 0x6d736100;
+constexpr uint32_t kWasmVersion = 0x01;
+
+// Binary encoding of local types.
+enum ValueTypeCode : uint8_t {
+ kLocalVoid = 0x40,
+ kLocalI32 = 0x7f,
+ kLocalI64 = 0x7e,
+ kLocalF32 = 0x7d,
+ kLocalF64 = 0x7c,
+ kLocalS128 = 0x7b
+};
+// Binary encoding of other types.
+constexpr uint8_t kWasmFunctionTypeCode = 0x60;
+constexpr uint8_t kWasmAnyFunctionTypeCode = 0x70;
+
+// Binary encoding of import/export kinds.
+enum ImportExportKindCode : uint8_t {
+ kExternalFunction = 0,
+ kExternalTable = 1,
+ kExternalMemory = 2,
+ kExternalGlobal = 3
+};
+
+// Binary encoding of maximum and shared flags for memories.
+enum MaximumFlag : uint8_t { kNoMaximumFlag = 0, kHasMaximumFlag = 1 };
+
+enum MemoryFlags : uint8_t {
+ kNoMaximum = 0,
+ kMaximum = 1,
+ kSharedNoMaximum = 2,
+ kSharedAndMaximum = 3
+};
+
+// Binary encoding of sections identifiers.
+enum SectionCode : int8_t {
+ kUnknownSectionCode = 0, // code for unknown sections
+ kTypeSectionCode = 1, // Function signature declarations
+ kImportSectionCode = 2, // Import declarations
+ kFunctionSectionCode = 3, // Function declarations
+ kTableSectionCode = 4, // Indirect function table and other tables
+ kMemorySectionCode = 5, // Memory attributes
+ kGlobalSectionCode = 6, // Global declarations
+ kExportSectionCode = 7, // Exports
+ kStartSectionCode = 8, // Start function declaration
+ kElementSectionCode = 9, // Elements section
+ kCodeSectionCode = 10, // Function code
+ kDataSectionCode = 11, // Data segments
+ kNameSectionCode = 12, // Name section (encoded as a string)
+ kExceptionSectionCode = 13, // Exception section
+
+ // Helper values
+ kFirstSectionInModule = kTypeSectionCode,
+ kLastKnownModuleSection = kExceptionSectionCode,
+};
+
+// Binary encoding of name section kinds.
+enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
+
+constexpr uint32_t kWasmPageSize = 0x10000;
+constexpr int kInvalidExceptionTag = -1;
+
+// TODO(wasm): Wrap WasmCodePosition in a struct.
+using WasmCodePosition = int;
+constexpr WasmCodePosition kNoCodePosition = -1;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_CONSTANTS_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 49ca995f5d..87995df4e6 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -6,6 +6,7 @@
#include "src/assembler-inl.h"
#include "src/assert-scope.h"
+#include "src/base/optional.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
@@ -14,6 +15,7 @@
#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -68,10 +70,9 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
DCHECK_LE(0, func_index);
DCHECK_LE(0, local_index);
if (!debug_info->has_locals_names()) {
- Handle<WasmCompiledModule> compiled_module(
- debug_info->wasm_instance()->compiled_module(), isolate);
- Handle<FixedArray> locals_names =
- wasm::DecodeLocalNames(isolate, compiled_module);
+ Handle<WasmSharedModuleData> shared(
+ debug_info->wasm_instance()->compiled_module()->shared(), isolate);
+ Handle<FixedArray> locals_names = wasm::DecodeLocalNames(isolate, shared);
debug_info->set_locals_names(*locals_names);
}
@@ -131,15 +132,18 @@ class InterpreterHandle {
static Vector<const byte> GetBytes(WasmDebugInfo* debug_info) {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
- SeqOneByteString* bytes_str =
- debug_info->wasm_instance()->compiled_module()->module_bytes();
+ SeqOneByteString* bytes_str = debug_info->wasm_instance()
+ ->compiled_module()
+ ->shared()
+ ->module_bytes();
return {bytes_str->GetChars(), static_cast<size_t>(bytes_str->length())};
}
public:
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
- module_(debug_info->wasm_instance()->compiled_module()->module()),
+ module_(
+ debug_info->wasm_instance()->compiled_module()->shared()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
debug_info->wasm_instance()->wasm_context()->get()) {}
@@ -305,11 +309,12 @@ class InterpreterHandle {
// Check whether we hit a breakpoint.
if (isolate_->debug()->break_points_active()) {
- Handle<WasmCompiledModule> compiled_module(
- GetInstanceObject()->compiled_module(), isolate_);
- int position = GetTopPosition(compiled_module);
+ Handle<WasmSharedModuleData> shared(
+ GetInstanceObject()->compiled_module()->shared(), isolate_);
+ int position = GetTopPosition(shared);
Handle<FixedArray> breakpoints;
- if (compiled_module->CheckBreakPoints(position).ToHandle(&breakpoints)) {
+ if (WasmSharedModuleData::CheckBreakPoints(isolate_, shared, position)
+ .ToHandle(&breakpoints)) {
// We hit one or several breakpoints. Clear stepping, notify the
// listeners and return.
ClearStepping();
@@ -341,13 +346,13 @@ class InterpreterHandle {
isolate_->debug()->OnDebugBreak(isolate_->factory()->empty_fixed_array());
}
- int GetTopPosition(Handle<WasmCompiledModule> compiled_module) {
+ int GetTopPosition(Handle<WasmSharedModuleData> shared) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
DCHECK_LT(0, thread->GetFrameCount());
auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return compiled_module->GetFunctionOffset(frame->function()->func_index) +
+ return shared->GetFunctionOffset(frame->function()->func_index) +
frame->pc();
}
@@ -368,8 +373,8 @@ class InterpreterHandle {
return stack;
}
- std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
- Address frame_pointer, int idx) {
+ WasmInterpreter::FramePtr GetInterpretedFrame(Address frame_pointer,
+ int idx) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
@@ -557,7 +562,7 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
int GetNumFunctions(WasmInstanceObject* instance) {
size_t num_functions =
- instance->compiled_module()->module()->functions.size();
+ instance->compiled_module()->shared()->module()->functions.size();
DCHECK_GE(kMaxInt, num_functions);
return static_cast<int>(num_functions);
}
@@ -622,7 +627,7 @@ void RedirectCallsitesInInstanceGC(Isolate* isolate,
CodeRelocationMapGC& map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
- FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
+ FixedArray* code_table = instance->compiled_module()->code_table();
for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
RedirectCallsitesInCodeGC(Code::cast(code_table->get(i)), map);
}
@@ -630,7 +635,7 @@ void RedirectCallsitesInInstanceGC(Isolate* isolate,
// Redirect all calls in exported functions.
FixedArray* weak_exported_functions =
- instance->compiled_module()->ptr_to_weak_exported_functions();
+ instance->compiled_module()->weak_exported_functions();
for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
@@ -652,7 +657,7 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
// Redirect all calls in exported functions.
FixedArray* weak_exported_functions =
- instance->compiled_module()->ptr_to_weak_exported_functions();
+ instance->compiled_module()->weak_exported_functions();
for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
@@ -723,22 +728,25 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
+ wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
- Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+ Handle<FixedArray> code_table(instance->compiled_module()->code_table(),
+ isolate);
CodeRelocationMapGC code_to_relocate_gc(isolate->heap());
- // TODO(6792): No longer needed once WebAssembly code is off heap.
+ // We may modify js wrappers, as well as wasm functions. Hence the 2
+ // modification scopes.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ wasm::NativeModuleModificationScope native_module_modification_scope(
+ native_module);
+
for (int func_index : func_indexes) {
DCHECK_LE(0, func_index);
- DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
- func_index);
+ DCHECK_GT(module->functions.size(), func_index);
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index,
- instance->compiled_module()->module()->functions[func_index].sig,
- instance);
+ isolate, func_index, module->functions[func_index].sig, instance);
if (FLAG_wasm_jit_to_native) {
const wasm::WasmCode* wasm_new_code =
native_module->AddInterpreterWrapper(new_code, func_index);
@@ -782,7 +790,7 @@ std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
}
-std::unique_ptr<wasm::InterpretedFrame> WasmDebugInfo::GetInterpretedFrame(
+wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
Address frame_pointer, int idx) {
return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
}
@@ -858,7 +866,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
name, new_entry_code, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
new_entry->set_context(
- *debug_info->wasm_instance()->compiled_module()->native_context());
+ debug_info->wasm_instance()->compiled_module()->native_context());
new_entry->set_shared(*shared);
entries->set(index, *new_entry);
}
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
new file mode 100644
index 0000000000..4c84b70dbd
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-engine.h"
+#include "src/objects-inl.h"
+#include "src/wasm/module-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
+ // TODO(titzer): remove dependency on the isolate.
+ if (bytes.start() == nullptr || bytes.length() == 0) return false;
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), true, kWasmOrigin);
+ return result.ok();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
new file mode 100644
index 0000000000..bf06b47ed7
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_ENGINE_H_
+#define WASM_ENGINE_H_
+
+#include <memory>
+
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-memory.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+
+// The central data structure that represents an engine instance capable of
+// loading, instantiating, and executing WASM code.
+class V8_EXPORT_PRIVATE WasmEngine {
+ public:
+ explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
+ : code_manager_(std::move(code_manager)) {}
+
+ bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+
+ CompilationManager* compilation_manager() { return &compilation_manager_; }
+
+ WasmCodeManager* code_manager() const { return code_manager_.get(); }
+
+ WasmAllocationTracker* allocation_tracker() { return &allocation_tracker_; }
+
+ private:
+ CompilationManager compilation_manager_;
+ std::unique_ptr<WasmCodeManager> code_manager_;
+ WasmAllocationTracker allocation_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmEngine);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 238785ca3c..0a9d1401e3 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -55,7 +55,7 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
// achieve proper rounding in all cases we have to adjust the high_word
// with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
// the high_word if the low_word may affect the rounding of the high_word.
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32);
float shift = static_cast<float>(1ull << 32);
@@ -65,7 +65,7 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
shift = static_cast<float>(1ull << 31);
}
- if ((high_word & 0xfe000000) && low_word) {
+ if ((high_word & 0xFE000000) && low_word) {
// Set the rounding bit.
high_word |= 1;
}
@@ -91,7 +91,7 @@ void uint64_to_float64_wrapper(uint64_t* input, double* output) {
// static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
// semantics. The idea is to calculate
// static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32);
double shift = static_cast<double>(1ull << 32);
@@ -201,21 +201,29 @@ int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
}
uint32_t word32_ctz_wrapper(uint32_t* input) {
- return static_cast<uint32_t>(base::bits::CountTrailingZeros(*input));
+ return base::bits::CountTrailingZeros(*input);
}
uint32_t word64_ctz_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(
- base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(input)));
+ return base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(input));
}
uint32_t word32_popcnt_wrapper(uint32_t* input) {
- return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+ return base::bits::CountPopulation(*input);
}
uint32_t word64_popcnt_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(
- base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(input)));
+ return base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(input));
+}
+
+uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p) {
+ uint32_t shift = (*shift_p & 31);
+ return (*input_p << shift) | (*input_p >> (32 - shift));
+}
+
+uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p) {
+ uint32_t shift = (*shift_p & 31);
+ return (*input_p >> shift) | (*input_p << (32 - shift));
}
void float64_pow_wrapper(double* param0, double* param1) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index e4e88de0db..dea620338a 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -59,6 +59,10 @@ uint32_t word32_popcnt_wrapper(uint32_t* input);
uint32_t word64_popcnt_wrapper(uint64_t* input);
+uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p);
+
+uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p);
+
void float64_pow_wrapper(double* param0, double* param1);
void set_thread_in_wasm_flag();
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 80d56a05f8..2f8fb0bf4a 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -19,6 +19,7 @@
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -31,14 +32,10 @@ namespace v8 {
namespace internal {
namespace wasm {
-#if DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
} while (false)
-#else
-#define TRACE(...)
-#endif
#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
@@ -129,6 +126,12 @@ namespace wasm {
V(F32CopySign, Float32) \
V(F64CopySign, Float64)
+#define FOREACH_I32CONV_FLOATOP(V) \
+ V(I32SConvertF32, int32_t, float) \
+ V(I32SConvertF64, int32_t, double) \
+ V(I32UConvertF32, uint32_t, float) \
+ V(I32UConvertF64, uint32_t, double)
+
#define FOREACH_OTHER_UNOP(V) \
V(I32Clz, uint32_t) \
V(I32Ctz, uint32_t) \
@@ -150,10 +153,6 @@ namespace wasm {
V(F64Floor, double) \
V(F64Trunc, double) \
V(F64NearestInt, double) \
- V(I32SConvertF32, float) \
- V(I32SConvertF64, double) \
- V(I32UConvertF32, float) \
- V(I32UConvertF64, double) \
V(I32ConvertI64, int64_t) \
V(I64SConvertF32, float) \
V(I64SConvertF64, double) \
@@ -223,15 +222,15 @@ inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
}
inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
- return a << (b & 0x1f);
+ return a << (b & 0x1F);
}
inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
- return a >> (b & 0x1f);
+ return a >> (b & 0x1F);
}
inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
- return a >> (b & 0x1f);
+ return a >> (b & 0x1F);
}
inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
@@ -272,34 +271,34 @@ inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
}
inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
- return a << (b & 0x3f);
+ return a << (b & 0x3F);
}
inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
- return a >> (b & 0x3f);
+ return a >> (b & 0x3F);
}
inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
- return a >> (b & 0x3f);
+ return a >> (b & 0x3F);
}
inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1f);
+ uint32_t shift = (b & 0x1F);
return (a >> shift) | (a << (32 - shift));
}
inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1f);
+ uint32_t shift = (b & 0x1F);
return (a << shift) | (a >> (32 - shift));
}
inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3f);
+ uint32_t shift = (b & 0x3F);
return (a >> shift) | (a << (64 - shift));
}
inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3f);
+ uint32_t shift = (b & 0x3F);
return (a << shift) | (a >> (64 - shift));
}
@@ -444,59 +443,26 @@ inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
-int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
- // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
- // number above INT32_MAX which cannot be represented as int32.
- float upper_bound = 2147483648.0f;
- // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
- // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
- // is.
- float lower_bound = static_cast<float>(INT32_MIN);
- if (a < upper_bound && a >= lower_bound) {
- return static_cast<int32_t>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
- // The upper bound is (INT32_MAX + 1), which is the lowest double-
- // representable number above INT32_MAX which cannot be represented as int32.
- double upper_bound = 2147483648.0;
- // The lower bound is (INT32_MIN - 1), which is the greatest double-
- // representable number below INT32_MIN which cannot be represented as int32.
- double lower_bound = -2147483649.0;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<int32_t>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // float-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0f;
- double lower_bound = -1.0f;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<uint32_t>(a);
+template <typename int_type, typename float_type>
+int_type ExecuteConvert(float_type a, TrapReason* trap) {
+ if (is_inbounds<int_type>(a)) {
+ return static_cast<int_type>(a);
}
*trap = kTrapFloatUnrepresentable;
return 0;
}
-uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
- // The upper bound is (UINT32_MAX + 1), which is the lowest
- // double-representable number above UINT32_MAX which cannot be represented as
- // uint32.
- double upper_bound = 4294967296.0;
- double lower_bound = -1.0;
- if (a < upper_bound && a > lower_bound) {
- return static_cast<uint32_t>(a);
+template <typename int_type, typename float_type>
+int_type ExecuteConvertSaturate(float_type a) {
+ TrapReason base_trap = kTrapCount;
+ int32_t val = ExecuteConvert<int_type>(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
}
- *trap = kTrapFloatUnrepresentable;
- return 0;
+ return std::isnan(a) ? 0
+ : (a < static_cast<float_type>(0.0)
+ ? std::numeric_limits<int_type>::min()
+ : std::numeric_limits<int_type>::max());
}
inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
@@ -643,7 +609,7 @@ Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
DCHECK_GT(js_imports_table->length(), index);
} else {
const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
- DCHECK_EQ(wasm::WasmCode::WasmToJsWrapper, wasm_code->kind());
+ DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_code->kind());
js_imports_table = Handle<FixedArray>(wasm_code->owner()
->compiled_module()
->owning_instance()
@@ -1005,7 +971,7 @@ class CodeMap {
Code* GetImportedFunctionGC(uint32_t function_index) {
DCHECK(has_instance());
DCHECK_GT(module_->num_imported_functions, function_index);
- FixedArray* code_table = instance()->compiled_module()->ptr_to_code_table();
+ FixedArray* code_table = instance()->compiled_module()->code_table();
return Code::cast(code_table->get(static_cast<int>(function_index)));
}
@@ -1221,7 +1187,7 @@ class ThreadImpl {
}
WasmValue GetReturnValue(uint32_t index) {
- if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xdeadbeef);
+ if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
DCHECK_EQ(WasmInterpreter::FINISHED, state_);
Activation act = current_activation();
// Current activation must be finished.
@@ -1510,10 +1476,10 @@ class ThreadImpl {
len = 1 + operand.length;
if (FLAG_wasm_trace_memory) {
- tracing::TraceMemoryOperation(
- tracing::kWasmInterpreted, false, rep, operand.offset + index,
- code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ wasm::MemoryTracingInfo info(operand.offset + index, false, rep);
+ TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ code->function->func_index, static_cast<int>(pc),
+ wasm_context_->mem_start);
}
return true;
@@ -1536,22 +1502,22 @@ class ThreadImpl {
len = 1 + operand.length;
if (FLAG_wasm_trace_memory) {
- tracing::TraceMemoryOperation(
- tracing::kWasmInterpreted, true, rep, operand.offset + index,
- code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ wasm::MemoryTracingInfo info(operand.offset + index, true, rep);
+ TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ code->function->func_index, static_cast<int>(pc),
+ wasm_context_->mem_start);
}
return true;
}
template <typename type>
- bool ExtractAtomicBinOpParams(Decoder* decoder, InterpreterCode* code,
- Address& address, pc_t pc, type& val,
- int& len) {
+ bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
+ Address& address, pc_t pc, int& len,
+ type* val = nullptr) {
MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc + 1),
sizeof(type));
- val = Pop().to<uint32_t>();
+ if (val) *val = Pop().to<uint32_t>();
uint32_t index = Pop().to<uint32_t>();
address = BoundsCheckMem<type>(operand.offset, index);
if (!address) {
@@ -1562,42 +1528,48 @@ class ThreadImpl {
return true;
}
+ bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
+ InterpreterCode* code, pc_t pc, int& len) {
+ switch (opcode) {
+ case kExprI32SConvertSatF32:
+ Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
+ return true;
+ case kExprI32UConvertSatF32:
+ Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
+ return true;
+ case kExprI32SConvertSatF64:
+ Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
+ return true;
+ case kExprI32UConvertSatF64:
+ Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
+ return true;
+ default:
+ V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
+ code->start[pc], OpcodeName(code->start[pc]));
+ UNREACHABLE();
+ }
+ return false;
+ }
+
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
-// TODO(gdeepti): Remove work-around when the bots are upgraded to a more
-// recent gcc version. The gcc bots (Android ARM, linux) currently use
-// gcc 4.8, in which atomics are insufficiently supported, also Bug#58016
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016)
-#if __GNUG__ && __GNUC__ < 5
-#define ATOMIC_BINOP_CASE(name, type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
- return false; \
- } \
- result = WasmValue( \
- __##operation(reinterpret_cast<type*>(addr), val, __ATOMIC_SEQ_CST)); \
- break; \
- }
-#else
-#define ATOMIC_BINOP_CASE(name, type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<std::type>) == sizeof(type), \
- "Size mismatch for types std::atomic<std::" #type \
- ">, and " #type); \
- result = WasmValue( \
- std::operation(reinterpret_cast<std::atomic<std::type>*>(addr), val)); \
- break; \
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = WasmValue( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ Push(result); \
+ break; \
}
-#endif
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
@@ -1613,20 +1585,48 @@ class ThreadImpl {
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
-#if __GNUG__ && __GNUC__ < 5
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange_n);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange_n);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange_n);
-#else
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
-#endif
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_LOAD_CASE(name, type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = \
+ WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
+ Push(result); \
+ break; \
+ }
+ ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
+ ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
+#undef ATOMIC_LOAD_CASE
+#define ATOMIC_STORE_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
+ break; \
+ }
+ ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
+ ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
+#undef ATOMIC_STORE_CASE
default:
return false;
}
- Push(result);
return true;
}
@@ -2076,8 +2076,8 @@ class ThreadImpl {
case kExprMemorySize: {
MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(wasm_context_->mem_size /
- WasmModule::kPageSize)));
+ Push(WasmValue(
+ static_cast<uint32_t>(wasm_context_->mem_size / kWasmPageSize)));
len = 1 + operand.length;
break;
}
@@ -2094,6 +2094,11 @@ class ThreadImpl {
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
break;
}
+ case kNumericPrefix: {
+ ++len;
+ if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
+ break;
+ }
case kAtomicPrefix: {
if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
break;
@@ -2125,19 +2130,27 @@ class ThreadImpl {
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
-#define EXECUTE_OTHER_UNOP(name, ctype) \
+#define EXECUTE_UNOP(name, ctype, exec_fn) \
case kExpr##name: { \
TrapReason trap = kTrapCount; \
ctype val = Pop().to<ctype>(); \
- auto result = Execute##name(val, &trap); \
+ auto result = exec_fn(val, &trap); \
possible_nondeterminism_ |= has_nondeterminism(result); \
if (trap != kTrapCount) return DoTrap(trap, pc); \
Push(WasmValue(result)); \
break; \
}
+
+#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
+#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
+ EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
+ FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
+#undef EXECUTE_I32CONV_FLOATOP
+#undef EXECUTE_UNOP
+
default:
V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
code->start[pc], OpcodeName(code->start[pc]));
@@ -2452,14 +2465,14 @@ class ThreadImpl {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- if (code->kind() == wasm::WasmCode::Function) {
- DCHECK_EQ(*code->owner()->compiled_module()->owning_instance(),
+ if (code->kind() == wasm::WasmCode::kFunction) {
+ DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
codemap()->instance());
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
- if (code->kind() == wasm::WasmCode::WasmToJsWrapper) {
+ if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::WasmToWasmWrapper) {
+ } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2540,66 +2553,61 @@ class ThreadImpl {
if (!FLAG_wasm_jit_to_native) {
// Check signature.
- FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
- if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
+ FixedArray* fun_tables = compiled_module->function_tables();
+ if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
+ // Reconstitute the global handle to the function table, from the
+ // address stored in the respective table of tables.
int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ FixedArray* fun_table = *reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int));
+ // Function tables store <smi, code> pairs.
+ int num_funcs_in_table =
+ fun_table->length() / compiler::kFunctionTableEntrySize;
+ if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
return {ExternalCallResult::INVALID_FUNC};
}
- int found_sig =
- Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ int found_sig = Smi::ToInt(fun_table->get(
+ compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
// Get code object.
- FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
- DCHECK_EQ(sig_tables->length(), fun_tables->length());
- Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
- DCHECK_EQ(sig_table->length(), fun_table->length());
- target_gc = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ target_gc = Code::cast(fun_table->get(
+ compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
// Check signature.
- std::vector<GlobalHandleAddress>& sig_tables =
- compiled_module->GetNativeModule()->signature_tables();
- if (table_index >= sig_tables.size()) {
+ std::vector<GlobalHandleAddress>& fun_tables =
+ compiled_module->GetNativeModule()->function_tables();
+ if (table_index >= fun_tables.size()) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
- int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(
- reinterpret_cast<FixedArray**>(sig_tables[table_index_as_int]));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ // Reconstitute the global handle to the function table, from the
+ // address stored in the respective table of tables.
+ FixedArray* fun_table =
+ *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
+ // Function tables store <smi, code> pairs.
+ int num_funcs_in_table =
+ fun_table->length() / compiler::kFunctionTableEntrySize;
+ if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
return {ExternalCallResult::INVALID_FUNC};
}
- int found_sig =
- Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ int found_sig = Smi::ToInt(fun_table->get(
+ compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
// Get code object.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- DCHECK_EQ(sig_tables.size(), fun_tables.size());
- Handle<FixedArray> fun_table(
- reinterpret_cast<FixedArray**>(fun_tables[table_index_as_int]));
- DCHECK_EQ(sig_table->length(), fun_table->length());
Address first_instr =
- Foreign::cast(fun_table->get(static_cast<int>(entry_index)))
+ Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
+ static_cast<int>(entry_index))))
->foreign_address();
target =
- isolate->wasm_code_manager()->GetCodeFromStartAddress(first_instr);
+ isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
+ first_instr);
}
}
@@ -2761,11 +2769,10 @@ pc_t WasmInterpreter::Thread::GetBreakpointPc() {
int WasmInterpreter::Thread::GetFrameCount() {
return ToImpl(this)->GetFrameCount();
}
-std::unique_ptr<InterpretedFrame> WasmInterpreter::Thread::GetFrame(int index) {
+WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
DCHECK_LE(0, index);
DCHECK_GT(GetFrameCount(), index);
- return std::unique_ptr<InterpretedFrame>(
- ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
+ return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
return ToImpl(this)->GetReturnValue(index);
@@ -2926,6 +2933,9 @@ WasmValue InterpretedFrame::GetLocalValue(int index) const {
WasmValue InterpretedFrame::GetStackValue(int index) const {
return ToImpl(this)->GetStackValue(index);
}
+void InterpretedFrame::Deleter::operator()(InterpretedFrame* ptr) {
+ delete ToImpl(ptr);
+}
//============================================================================
// Public API of the heap objects scope.
@@ -2945,6 +2955,7 @@ WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
#undef WASM_CTYPES
#undef FOREACH_SIMPLE_BINOP
#undef FOREACH_OTHER_BINOP
+#undef FOREACH_I32CONV_FLOATOP
#undef FOREACH_OTHER_UNOP
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index cdfa74cfad..b0c100b5a9 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -71,6 +71,12 @@ class InterpretedFrame {
WasmValue GetLocalValue(int index) const;
WasmValue GetStackValue(int index) const;
+ // Deleter struct to delete the underlying InterpretedFrameImpl without
+ // violating language specifications.
+ struct Deleter {
+ void operator()(InterpretedFrame* ptr);
+ };
+
private:
friend class WasmInterpreter;
// Don't instante InterpretedFrames; they will be allocated as
@@ -113,6 +119,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
AfterCall = 1 << 1
};
+ using FramePtr = std::unique_ptr<InterpretedFrame, InterpretedFrame::Deleter>;
+
// Representation of a thread in the interpreter.
class V8_EXPORT_PRIVATE Thread {
// Don't instante Threads; they will be allocated as ThreadImpl in the
@@ -139,7 +147,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// TODO(clemensh): Make this uint32_t.
int GetFrameCount();
// The InterpretedFrame is only valid as long as the Thread is paused.
- std::unique_ptr<InterpretedFrame> GetFrame(int index);
+ FramePtr GetFrame(int index);
WasmValue GetReturnValue(int index = 0);
TrapReason GetTrapReason();
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 03cc26e017..ce2bf42455 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -17,13 +17,11 @@
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
-#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-api.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
-#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-result.h"
using v8::internal::wasm::ErrorThrower;
@@ -63,7 +61,8 @@ i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
}
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower,
+ bool* is_shared) {
const uint8_t* start = nullptr;
size_t length = 0;
v8::Local<v8::Value> source = args[0];
@@ -74,6 +73,7 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
start = reinterpret_cast<const uint8_t*>(contents.Data());
length = contents.ByteLength();
+ *is_shared = buffer->IsSharedArrayBuffer();
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
Local<TypedArray> array = Local<TypedArray>::Cast(source);
@@ -84,6 +84,7 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
start =
reinterpret_cast<const uint8_t*>(contents.Data()) + array->ByteOffset();
length = array->ByteLength();
+ *is_shared = buffer->IsSharedArrayBuffer();
} else {
thrower->TypeError("Argument 0 must be a buffer source");
}
@@ -154,7 +155,8 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
if (thrower.error()) {
auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false),
@@ -162,7 +164,8 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
- i::wasm::AsyncCompile(i_isolate, promise, bytes);
+ // Asynchronous compilation handles copying wire bytes if necessary.
+ i::wasm::AsyncCompile(i_isolate, promise, bytes, is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -172,16 +175,31 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(isolate);
i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (!thrower.error() &&
- i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), bytes)) {
- return_value.Set(v8::True(isolate));
- } else {
+
+ if (thrower.error()) {
if (thrower.wasm_error()) thrower.Reset(); // Clear error.
return_value.Set(v8::False(isolate));
+ return;
}
+
+ bool validated = false;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes);
+ }
+
+ return_value.Set(Boolean::New(isolate, validated));
}
// new WebAssembly.Module(bytes) -> WebAssembly.Module
@@ -202,13 +220,25 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ bool is_shared = false;
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
if (thrower.error()) {
return;
}
- i::MaybeHandle<i::Object> module_obj =
- i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ i::MaybeHandle<i::Object> module_obj;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ }
+
if (module_obj.is_null()) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -598,10 +628,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
- size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
+ size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
static_cast<size_t>(initial);
+ const bool enable_guard_regions =
+ internal::trap_handler::IsTrapHandlerEnabled();
i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
- i_isolate, size, internal::trap_handler::UseTrapHandler(),
+ i_isolate, size, enable_guard_regions,
is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (buffer.is_null()) {
thrower.RangeError("could not allocate memory");
@@ -751,7 +783,20 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ // TODO(v8:7232) Allow reset/mutation after addressing referenced issue.
+ int32_t int_index = static_cast<int32_t>(index);
+ if (receiver->functions()->get(int_index) !=
+ i_isolate->heap()->undefined_value() &&
+ receiver->functions()->get(int_index) !=
+ i_isolate->heap()->null_value()) {
+ for (i::StackFrameIterator it(i_isolate); !it.done(); it.Advance()) {
+ if (it.frame()->type() == i::StackFrame::WASM_TO_JS) {
+ thrower.RangeError("Modifying existing entry in table not supported.");
+ return;
+ }
+ }
+ }
+ i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(int_index),
value->IsNull(i_isolate)
? i::Handle<i::JSFunction>::null()
: i::Handle<i::JSFunction>::cast(value));
@@ -794,18 +839,6 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("Unable to grow instance memory.");
return;
}
- if (!old_buffer->is_shared()) {
- // When delta_size == 0, or guard pages are enabled, the same backing store
- // is used. To be spec compliant, the buffer associated with the memory
- // object needs to be detached. Setup a new buffer with the same backing
- // store, detach the old buffer, and do not free backing store memory.
- bool free_memory = delta_size != 0 && !old_buffer->has_guard_region();
- if ((!free_memory && old_size != 0) || new_size64 == 0) {
- i::WasmMemoryObject::SetupNewBufferWithSameBackingStore(
- i_isolate, receiver, static_cast<uint32_t>(new_size64));
- }
- i::wasm::DetachMemoryBuffer(i_isolate, old_buffer, free_memory);
- }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index f298fd3fe1..184b6329ba 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -9,6 +9,8 @@
#include <cstdint>
#include <limits>
+#include "src/wasm/wasm-constants.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -41,10 +43,11 @@ constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1;
constexpr size_t kSpecMaxWasmMemoryPages = 65536;
+static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
+ "v8 should not be more permissive than the spec");
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
-// TODO(titzer): move WASM page size constant here.
-constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * 65536;
+constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 9f037c898d..fcbe60ae0e 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,6 +4,7 @@
#include "src/wasm/wasm-memory.h"
#include "src/objects-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -11,30 +12,70 @@ namespace v8 {
namespace internal {
namespace wasm {
+WasmAllocationTracker::~WasmAllocationTracker() {
+ // All reserved address space should be released before the allocation tracker
+ // is destroyed.
+ DCHECK_EQ(allocated_address_space_, 0u);
+}
+
+bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
+// Address space reservations are currently only meaningful using guard
+// regions, which is currently only supported on 64-bit systems. On other
+// platforms, we always fall back on bounds checks.
+#if V8_TARGET_ARCH_64_BIT
+ static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+
+ size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
+ DCHECK_GE(old_count + num_bytes, old_count);
+ if (old_count + num_bytes <= kAddressSpaceLimit) {
+ return true;
+ }
+ allocated_address_space_ -= num_bytes;
+#endif
+ return false;
+}
+
+void WasmAllocationTracker::ReleaseAddressSpace(size_t num_bytes) {
+ DCHECK_LE(num_bytes, allocated_address_space_);
+ allocated_address_space_ -= num_bytes;
+}
+
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool enable_guard_regions, void*& allocation_base,
- size_t& allocation_length) {
- // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+ bool require_guard_regions,
+ void** allocation_base,
+ size_t* allocation_length) {
+ // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
// weren't any.
- if (enable_guard_regions) {
+ if (require_guard_regions) {
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
- allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
- DCHECK_EQ(0, size % base::OS::CommitPageSize());
+ *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
// The Reserve makes the whole region inaccessible by default.
- allocation_base =
- isolate->array_buffer_allocator()->Reserve(allocation_length);
- if (allocation_base == nullptr) {
+ *allocation_base =
+ isolate->array_buffer_allocator()->Reserve(*allocation_length);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
- void* memory = allocation_base;
+ void* memory = *allocation_base;
// Make the part we care about accessible.
isolate->array_buffer_allocator()->SetProtection(
@@ -47,13 +88,13 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
} else {
// TODO(titzer): use guard regions for minicage and merge with above code.
CHECK_LE(size, kV8MaxWasmMemoryBytes);
- allocation_length =
+ *allocation_length =
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
void* memory =
size == 0
? nullptr
- : isolate->array_buffer_allocator()->Allocate(allocation_length);
- allocation_base = memory;
+ : isolate->array_buffer_allocator()->Allocate(*allocation_length);
+ *allocation_base = memory;
return memory;
}
}
@@ -78,13 +119,12 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
}
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
- bool enable_guard_regions,
+ bool require_guard_regions,
SharedFlag shared) {
// Check against kMaxInt, since the byte length is stored as int in the
// JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
// line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- size > kMaxInt) {
+ if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
@@ -92,10 +132,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr; // Set by TryAllocateBackingStore
size_t allocation_length = 0; // Set by TryAllocateBackingStore
// Do not reserve memory till non zero memory is encountered.
- void* memory =
- (size == 0) ? nullptr
- : TryAllocateBackingStore(isolate, size, enable_guard_regions,
- allocation_base, allocation_length);
+ void* memory = (size == 0) ? nullptr
+ : TryAllocateBackingStore(
+ isolate, size, require_guard_regions,
+ &allocation_base, &allocation_length);
if (size > 0 && memory == nullptr) {
return Handle<JSArrayBuffer>::null();
@@ -111,11 +151,14 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
constexpr bool is_external = false;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, enable_guard_regions, shared);
+ size, is_external, require_guard_regions, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
+ if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
+ DCHECK(!buffer->is_neuterable());
+
const bool is_external = buffer->is_external();
DCHECK(!buffer->is_neuterable());
if (!is_external) {
@@ -130,6 +173,8 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
buffer->FreeBackingStore();
}
}
+
+ DCHECK(buffer->is_external());
buffer->set_is_neuterable(true);
buffer->Neuter();
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 2676f3ade7..c5d6ef5154 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -13,8 +13,28 @@ namespace v8 {
namespace internal {
namespace wasm {
+class WasmAllocationTracker {
+ public:
+ WasmAllocationTracker() {}
+ ~WasmAllocationTracker();
+
+ // ReserveAddressSpace attempts to increase the reserved address space counter
+ // to determine whether there is enough headroom to allocate another guarded
+ // Wasm memory. Returns true if successful (meaning it is okay to go ahead and
+ // allocate the buffer), false otherwise.
+ bool ReserveAddressSpace(size_t num_bytes);
+
+ // Reduces the address space counter so that the space can be reused.
+ void ReleaseAddressSpace(size_t num_bytes);
+
+ private:
+ std::atomic_size_t allocated_address_space_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(WasmAllocationTracker);
+};
+
Handle<JSArrayBuffer> NewArrayBuffer(
- Isolate*, size_t size, bool enable_guard_regions,
+ Isolate*, size_t size, bool require_guard_regions,
SharedFlag shared = SharedFlag::kNotShared);
Handle<JSArrayBuffer> SetupArrayBuffer(
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 407ef08700..90b1d702cf 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -11,7 +11,7 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -140,8 +140,8 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
void WasmFunctionBuilder::SetName(Vector<const char> name) { name_ = name; }
-void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
- int to_number_position) {
+void WasmFunctionBuilder::AddAsmWasmOffset(size_t call_position,
+ size_t to_number_position) {
// We only want to emit one mapping per byte offset.
DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
@@ -150,21 +150,25 @@ void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
last_asm_byte_offset_ = byte_offset;
- DCHECK_GE(call_position, 0);
- asm_offsets_.write_i32v(call_position - last_asm_source_position_);
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), call_position);
+ uint32_t call_position_u32 = static_cast<uint32_t>(call_position);
+ asm_offsets_.write_i32v(call_position_u32 - last_asm_source_position_);
- DCHECK_GE(to_number_position, 0);
- asm_offsets_.write_i32v(to_number_position - call_position);
- last_asm_source_position_ = to_number_position;
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), to_number_position);
+ uint32_t to_number_position_u32 = static_cast<uint32_t>(to_number_position);
+ asm_offsets_.write_i32v(to_number_position_u32 - call_position_u32);
+ last_asm_source_position_ = to_number_position_u32;
}
-void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
+void WasmFunctionBuilder::SetAsmFunctionStartPosition(
+ size_t function_position) {
DCHECK_EQ(0, asm_func_start_source_position_);
- DCHECK_LE(0, position);
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(), function_position);
+ uint32_t function_position_u32 = static_cast<uint32_t>(function_position);
// Must be called before emitting any asm.js source position.
DCHECK_EQ(0, asm_offsets_.size());
- asm_func_start_source_position_ = position;
- last_asm_source_position_ = position;
+ asm_func_start_source_position_ = function_position_u32;
+ last_asm_source_position_ = function_position_u32;
}
void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
@@ -339,7 +343,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
- buffer.write_u8(kWasmFunctionTypeForm);
+ buffer.write_u8(kWasmFunctionTypeCode);
buffer.write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(param));
@@ -388,7 +392,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer.write_u8(1); // table count
- buffer.write_u8(kWasmAnyFunctionTypeForm);
+ buffer.write_u8(kWasmAnyFunctionTypeCode);
buffer.write_u8(kHasMaximumFlag);
buffer.write_size(indirect_functions_.size());
buffer.write_size(indirect_functions_.size());
@@ -550,7 +554,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(4);
buffer.write(reinterpret_cast<const byte*>("name"), 4);
// Emit a subsection for the function names.
- buffer.write_u8(NameSectionType::kFunction);
+ buffer.write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
size_t functions_start = buffer.reserve_u32v();
// Emit the function names.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 898f996cd3..0beae76513 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -172,8 +172,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitWithU32V(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
void SetName(Vector<const char> name);
- void AddAsmWasmOffset(int call_position, int to_number_position);
- void SetAsmFunctionStartPosition(int position);
+ void AddAsmWasmOffset(size_t call_position, size_t to_number_position);
+ void SetAsmFunctionStartPosition(size_t function_position);
size_t GetPosition() const { return body_.size(); }
void FixupByte(size_t position, byte value) {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index bfeeb0fbff..b6b9117ae5 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -20,8 +20,8 @@
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
-#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -31,21 +31,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
- } while (false)
-
-#define TRACE_CHAIN(instance) \
- do { \
- instance->PrintInstancesChain(); \
- } while (false)
-
-#define TRACE_COMPILE(...) \
- do { \
- if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
- } while (false)
-
// static
const WasmExceptionSig WasmException::empty_sig_(0, 0, nullptr);
@@ -109,8 +94,8 @@ void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
}
}
-void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- wasm::NativeModule* native_module) {
+void UnpackAndRegisterProtectedInstructions(
+ Isolate* isolate, const wasm::NativeModule* native_module) {
DisallowHeapAllocation no_gc;
for (uint32_t i = native_module->num_imported_functions(),
@@ -118,7 +103,7 @@ void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
i < e; ++i) {
wasm::WasmCode* code = native_module->GetCode(i);
- if (code == nullptr || code->kind() != wasm::WasmCode::Function) {
+ if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) {
continue;
}
@@ -177,7 +162,7 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
DCHECK_NE(wasm_code.GetWasmCode()->kind(),
- wasm::WasmCode::WasmToWasmWrapper);
+ wasm::WasmCode::kWasmToWasmWrapper);
wasm::NativeModule* native_module = wasm_code.GetWasmCode()->owner();
// The only reason we pass owning_instance is for the GC case. Check
// that the values match.
@@ -203,30 +188,6 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
return code;
}
-void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function,
- Handle<Object> code_or_foreign) {
- DCHECK_EQ(0, dispatch_tables->length() % 4);
- for (int i = 0; i < dispatch_tables->length(); i += 4) {
- Handle<FixedArray> function_table(
- FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
- Handle<FixedArray> signature_table(
- FixedArray::cast(dispatch_tables->get(i + 3)), isolate);
- if (function) {
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_index = instance->module()->signature_map.Find(function->sig);
- signature_table->set(index, Smi::FromInt(sig_index));
- function_table->set(index, *code_or_foreign);
- } else {
- signature_table->set(index, Smi::FromInt(-1));
- function_table->set(index, Smi::kZero);
- }
- }
-}
-
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
@@ -246,8 +207,8 @@ bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
Handle<JSArray> GetImports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> module_string = factory->InternalizeUtf8String("module");
@@ -260,7 +221,7 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = compiled_module->module();
+ WasmModule* module = shared->module();
int num_imports = static_cast<int>(module->import_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
@@ -295,12 +256,12 @@ Handle<JSArray> GetImports(Isolate* isolate,
}
MaybeHandle<String> import_module =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.module_name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, import.module_name);
MaybeHandle<String> import_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.field_name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, import.field_name);
JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
NONE);
@@ -316,8 +277,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
Handle<JSArray> GetExports(Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
Handle<String> name_string = factory->InternalizeUtf8String("name");
@@ -329,7 +290,7 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<String> global_string = factory->InternalizeUtf8String("global");
// Create the result array.
- WasmModule* module = compiled_module->module();
+ WasmModule* module = shared->module();
int num_exports = static_cast<int>(module->export_table.size());
Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
@@ -364,8 +325,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSObject> entry = factory->NewJSObject(object_function);
MaybeHandle<String> export_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, exp.name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
+ exp.name);
JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
NONE);
@@ -380,15 +341,14 @@ Handle<JSArray> GetExports(Isolate* isolate,
Handle<JSArray> GetCustomSections(Isolate* isolate,
Handle<WasmModuleObject> module_object,
Handle<String> name, ErrorThrower* thrower) {
- Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
- isolate);
+ Handle<WasmSharedModuleData> shared(
+ module_object->compiled_module()->shared(), isolate);
Factory* factory = isolate->factory();
std::vector<CustomSectionOffset> custom_sections;
{
DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =
reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
const byte* end = start + module_bytes->length();
@@ -400,8 +360,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
// Gather matching sections.
for (auto& section : custom_sections) {
MaybeHandle<String> section_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, section.name);
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(isolate, shared,
+ section.name);
if (!name->Equals(*section_name.ToHandleChecked())) continue;
@@ -419,8 +379,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size, memory,
size);
DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =
reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
memcpy(memory, start + section.payload.offset(), section.payload.length());
@@ -441,9 +400,9 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
return array_object;
}
-Handle<FixedArray> DecodeLocalNames(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(), isolate);
+Handle<FixedArray> DecodeLocalNames(Isolate* isolate,
+ Handle<WasmSharedModuleData> shared) {
+ Handle<SeqOneByteString> wire_bytes(shared->module_bytes(), isolate);
LocalNames decoded_locals;
{
DisallowHeapAllocation no_gc;
@@ -459,33 +418,14 @@ Handle<FixedArray> DecodeLocalNames(
locals_names->set(func.function_index, *func_locals_names);
for (LocalName& name : func.names) {
Handle<String> name_str =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, name.name)
+ WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ isolate, shared, name.name)
.ToHandleChecked();
func_locals_names->set(name.local_index, *name_str);
}
}
return locals_names;
}
-
-const char* ExternalKindName(WasmExternalKind kind) {
- switch (kind) {
- case kExternalFunction:
- return "function";
- case kExternalTable:
- return "table";
- case kExternalMemory:
- return "memory";
- case kExternalGlobal:
- return "global";
- }
- return "unknown";
-}
-
-#undef TRACE
-#undef TRACE_CHAIN
-#undef TRACE_COMPILE
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index e44ca995b0..492c51487f 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -15,18 +15,18 @@
#include "src/wasm/decoder.h"
#include "src/wasm/signature-map.h"
-#include "src/wasm/wasm-heap.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
class WasmCompiledModule;
class WasmDebugInfo;
-class WasmModuleObject;
class WasmInstanceObject;
-class WasmTableObject;
class WasmMemoryObject;
+class WasmModuleObject;
+class WasmSharedModuleData;
+class WasmTableObject;
namespace compiler {
class CallDescriptor;
@@ -34,13 +34,7 @@ class CallDescriptor;
namespace wasm {
class ErrorThrower;
-
-enum WasmExternalKind {
- kExternalFunction = 0,
- kExternalTable = 1,
- kExternalMemory = 2,
- kExternalGlobal = 3
-};
+class NativeModule;
// Static representation of a wasm function.
struct WasmFunction {
@@ -117,14 +111,14 @@ struct WasmTableInit {
struct WasmImport {
WireBytesRef module_name; // module name.
WireBytesRef field_name; // import name.
- WasmExternalKind kind; // kind of the import.
+ ImportExportKindCode kind; // kind of the import.
uint32_t index; // index into the respective space.
};
// Static representation of a wasm export.
struct WasmExport {
WireBytesRef name; // exported name.
- WasmExternalKind kind; // kind of the export.
+ ImportExportKindCode kind; // kind of the export.
uint32_t index; // index into the respective space.
};
@@ -136,11 +130,6 @@ struct ModuleWireBytes;
struct V8_EXPORT_PRIVATE WasmModule {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmModule);
- static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
- static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
-
- static constexpr int kInvalidExceptionTag = -1;
-
std::unique_ptr<Zone> signature_zone;
uint32_t initial_pages = 0; // initial size of the memory in 64k pages
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
@@ -247,7 +236,7 @@ struct WasmFunctionName {
: function_(function), name_(name) {}
const WasmFunction* function_;
- WasmName name_;
+ const WasmName name_;
};
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
@@ -275,7 +264,7 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// Decode local variable names from the names section. Return FixedArray of
// FixedArray of <undefined|String>. The outer fixed array is indexed by the
// function index, the inner one by the local index.
-Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
+Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmSharedModuleData>);
// If the target is an export wrapper, return the {WasmFunction*} corresponding
// to the wrapped wasm function; in all other cases, return nullptr.
@@ -284,10 +273,6 @@ Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
// TODO(titzer): move this to WasmExportedFunction.
WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function,
- Handle<Object> code_or_foreign);
-
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
@@ -295,10 +280,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
-void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
- wasm::NativeModule* native_module);
-
-const char* ExternalKindName(WasmExternalKind);
+void UnpackAndRegisterProtectedInstructions(
+ Isolate* isolate, const wasm::NativeModule* native_module);
// TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit.
@@ -332,7 +315,7 @@ class TruncatedUserString {
private:
const char* start_;
- int length_;
+ const int length_;
char buffer_[kMaxLen];
};
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 27f7d68d17..0a85862174 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -56,8 +56,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
kFunctionTablesOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, signature_tables, FixedArray,
- kSignatureTablesOffset)
ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
kDirectlyCalledInstancesOffset)
ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
@@ -83,31 +81,14 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef OPTIONAL_ACCESSORS
-#define FORWARD_SHARED(type, name) \
- type WasmCompiledModule::name() { return shared()->name(); }
-FORWARD_SHARED(SeqOneByteString*, module_bytes)
-FORWARD_SHARED(wasm::WasmModule*, module)
-FORWARD_SHARED(Script*, script)
-FORWARD_SHARED(bool, is_asm_js)
-#undef FORWARD_SHARED
-
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
- Handle<TYPE> WasmCompiledModule::NAME() const { \
- return handle(ptr_to_##NAME()); \
- } \
- \
- MaybeHandle<TYPE> WasmCompiledModule::maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* WasmCompiledModule::maybe_ptr_to_##NAME() const { \
+ TYPE* WasmCompiledModule::maybe_##NAME() const { \
Object* obj = get(ID); \
if (!(TYPE_CHECK)) return nullptr; \
return TYPE::cast(obj); \
} \
\
- TYPE* WasmCompiledModule::ptr_to_##NAME() const { \
+ TYPE* WasmCompiledModule::NAME() const { \
Object* obj = get(ID); \
DCHECK(TYPE_CHECK); \
return TYPE::cast(obj); \
@@ -120,10 +101,7 @@ FORWARD_SHARED(bool, is_asm_js)
\
void WasmCompiledModule::reset_##NAME() { set_undefined(ID); } \
\
- void WasmCompiledModule::set_##NAME(Handle<TYPE> value) { \
- set_ptr_to_##NAME(*value); \
- } \
- void WasmCompiledModule::set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
+ void WasmCompiledModule::set_##NAME(TYPE* value) { set(ID, value); }
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -147,8 +125,9 @@ FORWARD_SHARED(bool, is_asm_js)
WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
public) \
\
- Handle<TYPE> WasmCompiledModule::NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
+ TYPE* WasmCompiledModule::NAME() const { \
+ DCHECK(!weak_##NAME()->cleared()); \
+ return TYPE::cast(weak_##NAME()->value()); \
}
#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
@@ -171,7 +150,7 @@ bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
void WasmCompiledModule::ReplaceCodeTableForTesting(
Handle<FixedArray> testing_table) {
- set_code_table(testing_table);
+ set_code_table(*testing_table);
}
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 565f38a9e7..c92a51716a 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -14,7 +14,9 @@
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -45,7 +47,9 @@ class CompiledModulesIterator
Handle<WasmCompiledModule> start_module, bool at_end)
: isolate_(isolate),
start_module_(start_module),
- current_(at_end ? Handle<WasmCompiledModule>::null() : start_module) {}
+ current_(
+ at_end ? Handle<WasmCompiledModule>::null()
+ : Handle<WasmCompiledModule>::New(*start_module, isolate)) {}
Handle<WasmCompiledModule> operator*() const {
DCHECK(!current_.is_null());
@@ -64,7 +68,7 @@ class CompiledModulesIterator
DCHECK(!current_.is_null());
if (!is_backwards_) {
if (current_->has_next_instance()) {
- current_ = current_->next_instance();
+ *current_.location() = current_->next_instance();
return;
}
// No more modules in next-links, now try the previous-links.
@@ -72,7 +76,7 @@ class CompiledModulesIterator
current_ = start_module_;
}
if (current_->has_prev_instance()) {
- current_ = current_->prev_instance();
+ *current_.location() = current_->prev_instance();
return;
}
current_ = Handle<WasmCompiledModule>::null();
@@ -118,7 +122,7 @@ class CompiledModuleInstancesIterator
bool NeedToAdvance() {
return !it.current_.is_null() &&
(!it.current_->has_weak_owning_instance() ||
- it.current_->ptr_to_weak_owning_instance()->cleared());
+ it.current_->weak_owning_instance()->cleared());
}
CompiledModulesIterator it;
};
@@ -131,14 +135,14 @@ iterate_compiled_module_instance_chain(
}
#ifdef DEBUG
-bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
- int func_index, int offset_in_func) {
+bool IsBreakablePosition(WasmSharedModuleData* shared, int func_index,
+ int offset_in_func) {
DisallowHeapAllocation no_gc;
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
wasm::BodyLocalDecls locals(&tmp);
- const byte* module_start = compiled_module->module_bytes()->GetChars();
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ const byte* module_start = shared->module_bytes()->GetChars();
+ WasmFunction& func = shared->module()->functions[func_index];
wasm::BytecodeIterator iterator(module_start + func.code.offset(),
module_start + func.code.end_offset(),
&locals);
@@ -159,6 +163,14 @@ void CompiledModuleFinalizer(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
+enum DispatchTableElements : int {
+ kDispatchTableInstanceOffset,
+ kDispatchTableIndexOffset,
+ kDispatchTableFunctionTableOffset,
+ // Marker:
+ kDispatchTableNumElements
+};
+
} // namespace
Handle<WasmModuleObject> WasmModuleObject::New(
@@ -170,7 +182,7 @@ Handle<WasmModuleObject> WasmModuleObject::New(
module_object->set_compiled_module(*compiled_module);
Handle<WeakCell> link_to_module =
isolate->factory()->NewWeakCell(module_object);
- compiled_module->set_weak_wasm_module(link_to_module);
+ compiled_module->set_weak_wasm_module(*link_to_module);
return module_object;
}
@@ -179,7 +191,7 @@ void WasmModuleObject::ValidateStateForTesting(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = module_obj->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
- CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
+ CHECK_EQ(compiled_module->weak_wasm_module()->value(), *module_obj);
CHECK(!compiled_module->has_prev_instance());
CHECK(!compiled_module->has_next_instance());
CHECK(!compiled_module->has_weak_owning_instance());
@@ -203,62 +215,61 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
Handle<Object> max = isolate->factory()->NewNumber(maximum);
table_obj->set_maximum_length(*max);
- Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
- table_obj->set_dispatch_tables(*dispatch_tables);
+ table_obj->set_dispatch_tables(isolate->heap()->empty_fixed_array());
return Handle<WasmTableObject>::cast(table_obj);
}
-Handle<FixedArray> WasmTableObject::AddDispatchTable(
- Isolate* isolate, Handle<WasmTableObject> table_obj,
- Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
+void WasmTableObject::AddDispatchTable(Isolate* isolate,
+ Handle<WasmTableObject> table_obj,
+ Handle<WasmInstanceObject> instance,
+ int table_index,
+ Handle<FixedArray> function_table) {
+ DCHECK_EQ(0, function_table->length() % compiler::kFunctionTableEntrySize);
Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables());
- DCHECK_EQ(0, dispatch_tables->length() % 4);
+ int old_length = dispatch_tables->length();
+ DCHECK_EQ(0, old_length % kDispatchTableNumElements);
- if (instance.is_null()) return dispatch_tables;
+ if (instance.is_null()) return;
// TODO(titzer): use weak cells here to avoid leaking instances.
// Grow the dispatch table and add a new entry at the end.
Handle<FixedArray> new_dispatch_tables =
- isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 4);
+ isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables,
+ kDispatchTableNumElements);
- new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
- new_dispatch_tables->set(dispatch_tables->length() + 1,
+ new_dispatch_tables->set(old_length + kDispatchTableInstanceOffset,
+ *instance);
+ new_dispatch_tables->set(old_length + kDispatchTableIndexOffset,
Smi::FromInt(table_index));
- new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
- new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
+ new_dispatch_tables->set(old_length + kDispatchTableFunctionTableOffset,
+ *function_table);
table_obj->set_dispatch_tables(*new_dispatch_tables);
-
- return new_dispatch_tables;
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
- DCHECK_EQ(0, dispatch_tables->length() % 4);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- for (int i = 0; i < dispatch_tables->length(); i += 4) {
- Handle<FixedArray> old_function_table(
- FixedArray::cast(dispatch_tables->get(i + 2)));
- Handle<FixedArray> old_signature_table(
- FixedArray::cast(dispatch_tables->get(i + 3)));
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ Handle<FixedArray> old_function_table(FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset)));
Handle<FixedArray> new_function_table = isolate->global_handles()->Create(
- *isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count));
- Handle<FixedArray> new_signature_table = isolate->global_handles()->Create(
- *isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count));
+ *isolate->factory()->CopyFixedArrayAndGrow(
+ old_function_table, count * compiler::kFunctionTableEntrySize));
GlobalHandleAddress new_function_table_addr = new_function_table.address();
- GlobalHandleAddress new_signature_table_addr =
- new_signature_table.address();
- int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
- // Update dispatch tables with new function/signature tables
- dispatch_tables->set(i + 2, *new_function_table);
- dispatch_tables->set(i + 3, *new_signature_table);
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset))->value();
+ // Update dispatch tables with new function tables.
+ dispatch_tables->set(i + kDispatchTableFunctionTableOffset,
+ *new_function_table);
// Patch the code of the respective instance.
if (FLAG_wasm_jit_to_native) {
@@ -269,18 +280,15 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ wasm::NativeModuleModificationScope native_module_modification_scope(
+ native_module);
GlobalHandleAddress old_function_table_addr =
native_module->function_tables()[table_index];
- GlobalHandleAddress old_signature_table_addr =
- native_module->signature_tables()[table_index];
code_specialization.PatchTableSize(old_size, old_size + count);
code_specialization.RelocatePointer(old_function_table_addr,
new_function_table_addr);
- code_specialization.RelocatePointer(old_signature_table_addr,
- new_signature_table_addr);
code_specialization.ApplyToWholeInstance(instance);
native_module->function_tables()[table_index] = new_function_table_addr;
- native_module->signature_tables()[table_index] = new_signature_table_addr;
} else {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
@@ -289,23 +297,15 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
WasmInstanceObject::cast(dispatch_tables->get(i));
WasmCompiledModule* compiled_module = instance->compiled_module();
GlobalHandleAddress old_function_table_addr =
- WasmCompiledModule::GetTableValue(
- compiled_module->ptr_to_function_tables(), table_index);
- GlobalHandleAddress old_signature_table_addr =
- WasmCompiledModule::GetTableValue(
- compiled_module->ptr_to_signature_tables(), table_index);
+ WasmCompiledModule::GetTableValue(compiled_module->function_tables(),
+ table_index);
code_specialization.PatchTableSize(old_size, old_size + count);
code_specialization.RelocatePointer(old_function_table_addr,
new_function_table_addr);
- code_specialization.RelocatePointer(old_signature_table_addr,
- new_signature_table_addr);
code_specialization.ApplyToWholeInstance(instance);
- WasmCompiledModule::UpdateTableValue(
- compiled_module->ptr_to_function_tables(), table_index,
- new_function_table_addr);
- WasmCompiledModule::UpdateTableValue(
- compiled_module->ptr_to_signature_tables(), table_index,
- new_signature_table_addr);
+ WasmCompiledModule::UpdateTableValue(compiled_module->function_tables(),
+ table_index,
+ new_function_table_addr);
}
}
}
@@ -316,34 +316,69 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- WasmFunction* wasm_function = nullptr;
+ wasm::FunctionSig* sig = nullptr;
Handle<Object> code = Handle<Object>::null();
Handle<Object> value = isolate->factory()->null_value();
if (!function.is_null()) {
auto exported_function = Handle<WasmExportedFunction>::cast(function);
- wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
// The verification that {function} is an export was done
// by the caller.
- DCHECK_NOT_NULL(wasm_function);
+ DCHECK(wasm_function != nullptr && wasm_function->sig != nullptr);
+ sig = wasm_function->sig;
value = function;
// TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
// and then we can just reuse the WASM to WASM wrapper.
WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ wasm::NativeModule* native_module =
+ wasm_code.IsCodeObject() ? nullptr : wasm_code.GetWasmCode()->owner();
CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
+ wasm::NativeModuleModificationScope native_modification_scope(
+ native_module);
code = wasm::GetOrCreateIndirectCallWrapper(
isolate, handle(exported_function->instance()), wasm_code,
- exported_function->function_index(), wasm_function->sig);
+ exported_function->function_index(), sig);
}
- UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
+ UpdateDispatchTables(table, index, sig, code);
array->set(index, *value);
}
+void WasmTableObject::UpdateDispatchTables(Handle<WasmTableObject> table,
+ int index, wasm::FunctionSig* sig,
+ Handle<Object> code_or_foreign) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+ Smi* sig_smi = Smi::FromInt(-1);
+ Object* code = Smi::kZero;
+ if (sig) {
+ DCHECK(code_or_foreign->IsCode() || code_or_foreign->IsForeign());
+ WasmInstanceObject* instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ auto sig_index = instance->module()->signature_map.Find(sig);
+ sig_smi = Smi::FromInt(sig_index);
+ code = *code_or_foreign;
+ } else {
+ DCHECK(code_or_foreign.is_null());
+ }
+ function_table->set(compiler::FunctionTableSigOffset(index), sig_smi);
+ function_table->set(compiler::FunctionTableCodeOffset(index), code);
+ }
+}
+
namespace {
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
- uint32_t pages, uint32_t maximum_pages) {
+ uint32_t pages, uint32_t maximum_pages,
+ bool use_trap_handler) {
if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
Address old_mem_start = nullptr;
uint32_t old_size = 0;
@@ -351,38 +386,64 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
old_mem_start = static_cast<Address>(old_buffer->backing_store());
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
}
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
- uint32_t old_pages = old_size / WasmModule::kPageSize;
+ DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ uint32_t old_pages = old_size / wasm::kWasmPageSize;
DCHECK_GE(std::numeric_limits<uint32_t>::max(),
- old_size + pages * WasmModule::kPageSize);
+ old_size + pages * wasm::kWasmPageSize);
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions = old_buffer.is_null()
- ? trap_handler::UseTrapHandler()
- : old_buffer->has_guard_region();
+ const bool enable_guard_regions =
+ old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
- static_cast<size_t>(old_pages + pages) * WasmModule::kPageSize;
- if (enable_guard_regions && old_size != 0) {
+ static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+ if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
+ new_size > kMaxInt) {
+ return Handle<JSArrayBuffer>::null();
+ }
+ if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store());
- if (new_size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
- new_size > kMaxInt) {
- return Handle<JSArrayBuffer>::null();
+ if (old_size != new_size) {
+ isolate->array_buffer_allocator()->SetProtection(
+ old_mem_start, new_size,
+ v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
- isolate->array_buffer_allocator()->SetProtection(
- old_mem_start, new_size,
- v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
- Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(new_size);
- old_buffer->set_byte_length(*length_obj);
- return old_buffer;
+ // NOTE: We must allocate a new array buffer here because the spec
+ // assumes that ArrayBuffers do not change size.
+ void* allocation_base = old_buffer->allocation_base();
+ size_t allocation_length = old_buffer->allocation_length();
+ void* backing_store = old_buffer->backing_store();
+ bool has_guard_region = old_buffer->has_guard_region();
+ bool is_external = old_buffer->is_external();
+ // Disconnect buffer early so GC won't free it.
+ i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
+ Handle<JSArrayBuffer> new_buffer = wasm::SetupArrayBuffer(
+ isolate, allocation_base, allocation_length, backing_store, new_size,
+ is_external, has_guard_region);
+ return new_buffer;
} else {
+ bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
- new_buffer = wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
+ if (pages != 0) {
+ // Allocate a new buffer and memcpy the old contents.
+ free_memory = true;
+ new_buffer =
+ wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
+ DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
+ } else {
+ // Reuse the prior backing store, but allocate a new array buffer.
+ new_buffer = wasm::SetupArrayBuffer(
+ isolate, old_buffer->allocation_base(),
+ old_buffer->allocation_length(), old_buffer->backing_store(),
+ new_size, old_buffer->is_external(), old_buffer->has_guard_region());
+ }
+ i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
}
@@ -397,7 +458,7 @@ void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
// To flush out bugs earlier, in DEBUG mode, check that all pages of the
// memory are accessible by reading and writing one byte on each page.
for (uint32_t offset = 0; offset < wasm_context->mem_size;
- offset += WasmModule::kPageSize) {
+ offset += wasm::kWasmPageSize) {
byte val = wasm_context->mem_start[offset];
wasm_context->mem_start[offset] = val;
}
@@ -409,6 +470,10 @@ void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
int32_t maximum) {
+ // TODO(kschimpf): Do we need to add an argument that defines the
+ // style of memory the user prefers (with/without trap handling), so
+ // that the memory will match the style of the compiled wasm module.
+ // See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor());
auto memory_obj = Handle<WasmMemoryObject>::cast(
@@ -417,8 +482,11 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
Handle<JSArrayBuffer> buffer;
if (maybe_buffer.is_null()) {
// If no buffer was provided, create a 0-length one.
+
+ // TODO(kschimpf): Modify to use argument defining style of
+ // memory. (see above).
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- trap_handler::UseTrapHandler());
+ trap_handler::IsTrapHandlerEnabled());
} else {
buffer = maybe_buffer.ToHandleChecked();
// Paranoid check that the buffer size makes sense.
@@ -434,7 +502,7 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
uint32_t WasmMemoryObject::current_pages() {
uint32_t byte_length;
CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
- return byte_length / WasmModule::kPageSize;
+ return byte_length / wasm::kWasmPageSize;
}
void WasmMemoryObject::AddInstance(Isolate* isolate,
@@ -459,32 +527,6 @@ void WasmMemoryObject::RemoveInstance(Isolate* isolate,
}
}
-void WasmMemoryObject::SetupNewBufferWithSameBackingStore(
- Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size) {
- // In case of Memory.Grow(0), or Memory.Grow(delta) with guard pages enabled,
- // Setup a new buffer, update memory object, and instances associated with the
- // memory object, as the current buffer will be detached.
- Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
- Handle<JSArrayBuffer> new_buffer;
-
- constexpr bool is_external = false;
- new_buffer = wasm::SetupArrayBuffer(
- isolate, old_buffer->allocation_base(), old_buffer->allocation_length(),
- old_buffer->backing_store(), size * WasmModule::kPageSize, is_external,
- old_buffer->has_guard_region());
- if (memory_object->has_instances()) {
- Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
- for (int i = 0; i < instances->Length(); i++) {
- Object* elem = instances->Get(i);
- if (!elem->IsWasmInstanceObject()) continue;
- Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
- isolate);
- SetInstanceMemory(isolate, instance, new_buffer);
- }
- }
- memory_object->set_array_buffer(*new_buffer);
-}
-
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
@@ -493,17 +535,18 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
if (!old_buffer->is_growable()) return -1;
uint32_t old_size = 0;
CHECK(old_buffer->byte_length()->ToUint32(&old_size));
- DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
- // Return current size if grow by 0.
- if (pages == 0) return old_size / WasmModule::kPageSize;
uint32_t maximum_pages = FLAG_wasm_max_mem_pages;
if (memory_object->has_maximum_pages()) {
maximum_pages = Min(FLAG_wasm_max_mem_pages,
static_cast<uint32_t>(memory_object->maximum_pages()));
}
- new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages);
+ // TODO(kschimpf): We need to fix this by adding a field to WasmMemoryObject
+ // that defines the style of memory being used.
+ new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages,
+ trap_handler::IsTrapHandlerEnabled());
if (new_buffer.is_null()) return -1;
if (memory_object->has_instances()) {
@@ -517,14 +560,16 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
}
memory_object->set_array_buffer(*new_buffer);
- return old_size / WasmModule::kPageSize;
+ return old_size / wasm::kWasmPageSize;
}
WasmModuleObject* WasmInstanceObject::module_object() {
- return *compiled_module()->wasm_module();
+ return compiled_module()->wasm_module();
}
-WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
+WasmModule* WasmInstanceObject::module() {
+ return compiled_module()->shared()->module();
+}
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
Handle<WasmInstanceObject> instance) {
@@ -553,46 +598,21 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
return instance;
}
-int32_t WasmInstanceObject::GetMemorySize() {
- if (!has_memory_object()) return 0;
- uint32_t bytes = memory_object()->array_buffer()->byte_length()->Number();
- DCHECK_EQ(0, bytes % WasmModule::kPageSize);
- return bytes / WasmModule::kPageSize;
-}
-
int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t pages) {
- if (pages == 0) return instance->GetMemorySize();
DCHECK(instance->has_memory_object());
return WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), pages);
}
-uint32_t WasmInstanceObject::GetMaxMemoryPages() {
- if (has_memory_object()) {
- if (memory_object()->has_maximum_pages()) {
- uint32_t maximum =
- static_cast<uint32_t>(memory_object()->maximum_pages());
- if (maximum < FLAG_wasm_max_mem_pages) return maximum;
- }
- }
- uint32_t compiled_maximum_pages = compiled_module()->module()->maximum_pages;
- Isolate* isolate = GetIsolate();
- assert(compiled_module()->module()->is_wasm());
- isolate->counters()->wasm_wasm_max_mem_pages_count()->AddSample(
- compiled_maximum_pages);
- if (compiled_maximum_pages != 0) return compiled_maximum_pages;
- return FLAG_wasm_max_mem_pages;
-}
-
WasmInstanceObject* WasmInstanceObject::GetOwningInstance(
const wasm::WasmCode* code) {
DisallowHeapAllocation no_gc;
Object* weak_link = nullptr;
- DCHECK(code->kind() == wasm::WasmCode::Function ||
- code->kind() == wasm::WasmCode::InterpreterStub);
- weak_link = code->owner()->compiled_module()->ptr_to_weak_owning_instance();
+ DCHECK(code->kind() == wasm::WasmCode::kFunction ||
+ code->kind() == wasm::WasmCode::kInterpreterStub);
+ weak_link = code->owner()->compiled_module()->weak_owning_instance();
DCHECK(weak_link->IsWeakCell());
WeakCell* cell = WeakCell::cast(weak_link);
if (cell->cleared()) return nullptr;
@@ -618,21 +638,21 @@ void WasmInstanceObject::ValidateInstancesChainForTesting(
CHECK_GE(instance_count, 0);
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = module_obj->compiled_module();
- CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+ CHECK_EQ(JSObject::cast(compiled_module->weak_wasm_module()->value()),
*module_obj);
Object* prev = nullptr;
int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
while (current_instance->has_next_instance()) {
CHECK((prev == nullptr && !current_instance->has_prev_instance()) ||
- current_instance->ptr_to_prev_instance() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(current_instance->ptr_to_weak_owning_instance()
+ current_instance->prev_instance() == prev);
+ CHECK_EQ(current_instance->weak_wasm_module()->value(), *module_obj);
+ CHECK(current_instance->weak_owning_instance()
->value()
->IsWasmInstanceObject());
prev = current_instance;
current_instance =
- WasmCompiledModule::cast(current_instance->ptr_to_next_instance());
+ WasmCompiledModule::cast(current_instance->next_instance());
++found_instances;
CHECK_LE(found_instances, instance_count);
}
@@ -644,7 +664,7 @@ void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module = instance->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
- CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
+ CHECK(compiled_module->weak_wasm_module()->cleared());
}
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
@@ -735,8 +755,9 @@ WasmCodeWrapper WasmExportedFunction::GetWasmCode() {
DCHECK(!it.done());
WasmCodeWrapper target;
if (FLAG_wasm_jit_to_native) {
- target = WasmCodeWrapper(GetIsolate()->wasm_code_manager()->LookupCode(
- it.rinfo()->js_to_wasm_address()));
+ target = WasmCodeWrapper(
+ GetIsolate()->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address()));
} else {
Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
if (!IsWasmFunctionCode(code)) continue;
@@ -953,8 +974,6 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
Handle<WasmSharedModuleData> shared, Handle<WasmInstanceObject> instance) {
if (!shared->has_breakpoint_infos()) return;
Isolate* isolate = shared->GetIsolate();
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
@@ -974,9 +993,9 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
int position = breakpoint_info->source_position();
// Find the function for this breakpoint, and set the breakpoint.
- int func_index = compiled_module->GetContainingFunction(position);
+ int func_index = shared->GetContainingFunction(position);
DCHECK_LE(0, func_index);
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ WasmFunction& func = shared->module()->functions[func_index];
int offset_in_func = position - func.code.offset();
WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
}
@@ -991,62 +1010,286 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
shared->set_lazy_compilation_orchestrator(*orch_handle);
}
+namespace {
+
+enum AsmJsOffsetTableEntryLayout {
+ kOTEByteOffset,
+ kOTECallPosition,
+ kOTENumberConvPosition,
+ kOTESize
+};
+
+Handle<ByteArray> GetDecodedAsmJsOffsetTable(
+ Handle<WasmSharedModuleData> shared, Isolate* isolate) {
+ DCHECK(shared->is_asm_js());
+ Handle<ByteArray> offset_table(shared->asm_js_offset_table(), isolate);
+
+ // The last byte in the asm_js_offset_tables ByteArray tells whether it is
+ // still encoded (0) or decoded (1).
+ enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
+ int table_type = offset_table->get(offset_table->length() - 1);
+ DCHECK(table_type == Encoded || table_type == Decoded);
+ if (table_type == Decoded) return offset_table;
+
+ wasm::AsmJsOffsetsResult asm_offsets;
+ {
+ DisallowHeapAllocation no_gc;
+ const byte* bytes_start = offset_table->GetDataStartAddress();
+ const byte* bytes_end = bytes_start + offset_table->length() - 1;
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ }
+ // Wasm bytes must be valid and must contain asm.js offset table.
+ DCHECK(asm_offsets.ok());
+ DCHECK_GE(kMaxInt, asm_offsets.val.size());
+ int num_functions = static_cast<int>(asm_offsets.val.size());
+ int num_imported_functions =
+ static_cast<int>(shared->module()->num_imported_functions);
+ DCHECK_EQ(shared->module()->functions.size(),
+ static_cast<size_t>(num_functions) + num_imported_functions);
+ int num_entries = 0;
+ for (int func = 0; func < num_functions; ++func) {
+ size_t new_size = asm_offsets.val[func].size();
+ DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
+ num_entries += static_cast<int>(new_size);
+ }
+ // One byte to encode that this is a decoded table.
+ DCHECK_GE(kMaxInt,
+ 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
+ int total_size = 1 + num_entries * kOTESize * kIntSize;
+ Handle<ByteArray> decoded_table =
+ isolate->factory()->NewByteArray(total_size, TENURED);
+ decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
+ shared->set_asm_js_offset_table(*decoded_table);
+
+ int idx = 0;
+ std::vector<WasmFunction>& wasm_funs = shared->module()->functions;
+ for (int func = 0; func < num_functions; ++func) {
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
+ asm_offsets.val[func];
+ if (func_asm_offsets.empty()) continue;
+ int func_offset = wasm_funs[num_imported_functions + func].code.offset();
+ for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
+ // Byte offsets must be strictly monotonously increasing:
+ DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
+ decoded_table->get_int(idx - kOTESize));
+ decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
+ decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
+ decoded_table->set_int(idx + kOTENumberConvPosition,
+ e.source_position_number_conversion);
+ idx += kOTESize;
+ }
+ }
+ DCHECK_EQ(total_size, idx * kIntSize + 1);
+ return decoded_table;
+}
+
+} // namespace
+
+int WasmSharedModuleData::GetSourcePosition(Handle<WasmSharedModuleData> shared,
+ uint32_t func_index,
+ uint32_t byte_offset,
+ bool is_at_number_conversion) {
+ Isolate* isolate = shared->GetIsolate();
+ const WasmModule* module = shared->module();
+
+ if (!module->is_asm_js()) {
+ // for non-asm.js modules, we just add the function's start offset
+ // to make a module-relative position.
+ return byte_offset + shared->GetFunctionOffset(func_index);
+ }
+
+ // asm.js modules have an additional offset table that must be searched.
+ Handle<ByteArray> offset_table = GetDecodedAsmJsOffsetTable(shared, isolate);
+
+ DCHECK_LT(func_index, module->functions.size());
+ uint32_t func_code_offset = module->functions[func_index].code.offset();
+ uint32_t total_offset = func_code_offset + byte_offset;
+
+ // Binary search for the total byte offset.
+ int left = 0; // inclusive
+ int right = offset_table->length() / kIntSize / kOTESize; // exclusive
+ DCHECK_LT(left, right);
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ int mid_entry = offset_table->get_int(kOTESize * mid);
+ DCHECK_GE(kMaxInt, mid_entry);
+ if (static_cast<uint32_t>(mid_entry) <= total_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // There should be an entry for each position that could show up on the stack
+ // trace:
+ DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
+ int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
+ return offset_table->get_int(kOTESize * left + idx);
+}
+
+v8::debug::WasmDisassembly WasmSharedModuleData::DisassembleFunction(
+ int func_index) {
+ DisallowHeapAllocation no_gc;
+
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module()->functions.size())
+ return {};
+
+ SeqOneByteString* module_bytes_str = module_bytes();
+ Vector<const byte> module_bytes(module_bytes_str->GetChars(),
+ module_bytes_str->length());
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
+bool WasmSharedModuleData::GetPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::BreakLocation>* locations) {
+ DisallowHeapAllocation no_gc;
+
+ std::vector<WasmFunction>& functions = module()->functions;
+ if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
+ (!end.IsEmpty() &&
+ (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
+ return false;
+
+ // start_func_index, start_offset and end_func_index is inclusive.
+ // end_offset is exclusive.
+ // start_offset and end_offset are module-relative byte offsets.
+ uint32_t start_func_index = start.GetLineNumber();
+ if (start_func_index >= functions.size()) return false;
+ int start_func_len = functions[start_func_index].code.length();
+ if (start.GetColumnNumber() > start_func_len) return false;
+ uint32_t start_offset =
+ functions[start_func_index].code.offset() + start.GetColumnNumber();
+ uint32_t end_func_index;
+ uint32_t end_offset;
+ if (end.IsEmpty()) {
+ // Default: everything till the end of the Script.
+ end_func_index = static_cast<uint32_t>(functions.size() - 1);
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ // If end is specified: Use it and check for valid input.
+ end_func_index = static_cast<uint32_t>(end.GetLineNumber());
+
+ // Special case: Stop before the start of the next function. Change to: Stop
+ // at the end of the function before, such that we don't disassemble the
+ // next function also.
+ if (end.GetColumnNumber() == 0 && end_func_index > 0) {
+ --end_func_index;
+ end_offset = functions[end_func_index].code.end_offset();
+ } else {
+ if (end_func_index >= functions.size()) return false;
+ end_offset =
+ functions[end_func_index].code.offset() + end.GetColumnNumber();
+ if (end_offset > functions[end_func_index].code.end_offset())
+ return false;
+ }
+ }
+
+ AccountingAllocator alloc;
+ Zone tmp(&alloc, ZONE_NAME);
+ const byte* module_start = module_bytes()->GetChars();
+
+ for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
+ ++func_idx) {
+ WasmFunction& func = functions[func_idx];
+ if (func.code.length() == 0) continue;
+
+ wasm::BodyLocalDecls locals(&tmp);
+ wasm::BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(),
+ &locals);
+ DCHECK_LT(0u, locals.encoded_size);
+ for (uint32_t offset : iterator.offsets()) {
+ uint32_t total_offset = func.code.offset() + offset;
+ if (total_offset >= end_offset) {
+ DCHECK_EQ(end_func_index, func_idx);
+ break;
+ }
+ if (total_offset < start_offset) continue;
+ locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
+ }
+ }
+ return true;
+}
+
+MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared, int position) {
+ if (!shared->has_breakpoint_infos()) return {};
+
+ Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
+ int insert_pos =
+ FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+ if (insert_pos >= breakpoint_infos->length()) return {};
+
+ Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
+ isolate);
+ if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
+ Handle<BreakPointInfo> breakpoint_info =
+ Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
+ if (breakpoint_info->source_position() != position) return {};
+
+ Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
+ isolate);
+ return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+}
+
Handle<WasmCompiledModule> WasmCompiledModule::New(
Isolate* isolate, WasmModule* module, Handle<FixedArray> code_table,
Handle<FixedArray> export_wrappers,
const std::vector<GlobalHandleAddress>& function_tables,
- const std::vector<GlobalHandleAddress>& signature_tables) {
- DCHECK_EQ(function_tables.size(), signature_tables.size());
+ bool use_trap_handler) {
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
// WasmCompiledModule::cast would fail since fields are not set yet.
Handle<WasmCompiledModule> compiled_module(
reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
- compiled_module->set_native_context(isolate->native_context());
+ Handle<WeakCell> weak_native_context =
+ isolate->factory()->NewWeakCell(isolate->native_context());
+ compiled_module->set_weak_native_context(*weak_native_context);
+ compiled_module->set_use_trap_handler(use_trap_handler);
if (!FLAG_wasm_jit_to_native) {
compiled_module->InitId();
- compiled_module->set_native_context(isolate->native_context());
- compiled_module->set_code_table(code_table);
- compiled_module->set_export_wrappers(export_wrappers);
+ compiled_module->set_code_table(*code_table);
+ compiled_module->set_export_wrappers(*export_wrappers);
// TODO(mtrofin): we copy these because the order of finalization isn't
// reliable, and we need these at Reset (which is called at
// finalization). If the order were reliable, and top-down, we could instead
// just get them from shared().
- compiled_module->set_initial_pages(module->initial_pages);
compiled_module->set_num_imported_functions(module->num_imported_functions);
int num_function_tables = static_cast<int>(function_tables.size());
if (num_function_tables > 0) {
- Handle<FixedArray> st =
- isolate->factory()->NewFixedArray(num_function_tables, TENURED);
Handle<FixedArray> ft =
isolate->factory()->NewFixedArray(num_function_tables, TENURED);
for (int i = 0; i < num_function_tables; ++i) {
- size_t index = static_cast<size_t>(i);
- SetTableValue(isolate, ft, i, function_tables[index]);
- SetTableValue(isolate, st, i, signature_tables[index]);
+ SetTableValue(isolate, ft, i, function_tables[i]);
}
// TODO(wasm): setting the empty tables here this way is OK under the
// assumption that we compile and then instantiate. It needs rework if we
// do direct instantiation. The empty tables are used as a default when
// resetting the compiled module.
- compiled_module->set_signature_tables(st);
- compiled_module->set_empty_signature_tables(st);
- compiled_module->set_function_tables(ft);
- compiled_module->set_empty_function_tables(ft);
+ compiled_module->set_function_tables(*ft);
+ compiled_module->set_empty_function_tables(*ft);
}
} else {
if (!export_wrappers.is_null()) {
- compiled_module->set_export_wrappers(export_wrappers);
+ compiled_module->set_export_wrappers(*export_wrappers);
}
wasm::NativeModule* native_module = nullptr;
{
std::unique_ptr<wasm::NativeModule> native_module_ptr =
- isolate->wasm_code_manager()->NewNativeModule(*module);
+ isolate->wasm_engine()->code_manager()->NewNativeModule(*module);
native_module = native_module_ptr.release();
Handle<Foreign> native_module_wrapper =
Managed<wasm::NativeModule>::From(isolate, native_module);
- compiled_module->set_native_module(native_module_wrapper);
+ compiled_module->set_native_module(*native_module_wrapper);
Handle<WasmCompiledModule> weak_link =
isolate->global_handles()->Create(*compiled_module);
GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
@@ -1058,18 +1301,18 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// This is here just because it's easier for APIs that need to work with
// either code_table or native_module. Otherwise we need to check if
// has_code_table and pass undefined.
- compiled_module->set_code_table(code_table);
+ compiled_module->set_code_table(*code_table);
native_module->function_tables() = function_tables;
- native_module->signature_tables() = signature_tables;
native_module->empty_function_tables() = function_tables;
- native_module->empty_signature_tables() = signature_tables;
int function_count = static_cast<int>(module->functions.size());
- compiled_module->set_handler_table(
- isolate->factory()->NewFixedArray(function_count, TENURED));
- compiled_module->set_source_positions(
- isolate->factory()->NewFixedArray(function_count, TENURED));
+ Handle<FixedArray> handler_table =
+ isolate->factory()->NewFixedArray(function_count, TENURED);
+ compiled_module->set_handler_table(*handler_table);
+ Handle<FixedArray> source_positions =
+ isolate->factory()->NewFixedArray(function_count, TENURED);
+ compiled_module->set_source_positions(*source_positions);
}
// TODO(mtrofin): copy the rest of the specialization parameters over.
// We're currently OK because we're only using defaults.
@@ -1080,7 +1323,8 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
Isolate* isolate, Handle<WasmCompiledModule> module) {
Handle<FixedArray> code_copy;
if (!FLAG_wasm_jit_to_native) {
- code_copy = isolate->factory()->CopyFixedArray(module->code_table());
+ code_copy = isolate->factory()->CopyFixedArray(
+ handle(module->code_table(), isolate));
}
Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
isolate->factory()->CopyFixedArray(module));
@@ -1090,7 +1334,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
ret->reset_weak_exported_functions();
if (!FLAG_wasm_jit_to_native) {
ret->InitId();
- ret->set_code_table(code_copy);
+ ret->set_code_table(*code_copy);
return ret;
}
@@ -1100,7 +1344,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
// which would shift the this pointer in set_native_module.
Handle<Foreign> native_module_wrapper =
Managed<wasm::NativeModule>::From(isolate, native_module.release());
- ret->set_native_module(native_module_wrapper);
+ ret->set_native_module(*native_module_wrapper);
Handle<WasmCompiledModule> weak_link =
isolate->global_handles()->Create(*ret);
GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
@@ -1112,7 +1356,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
if (module->has_lazy_compile_data()) {
Handle<FixedArray> lazy_comp_data = isolate->factory()->NewFixedArray(
module->lazy_compile_data()->length(), TENURED);
- ret->set_lazy_compile_data(lazy_comp_data);
+ ret->set_lazy_compile_data(*lazy_comp_data);
}
return ret;
}
@@ -1140,7 +1384,7 @@ Address WasmCompiledModule::GetTableValue(FixedArray* table, int index) {
wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
if (!has_native_module()) return nullptr;
- return Managed<wasm::NativeModule>::cast(ptr_to_native_module())->get();
+ return Managed<wasm::NativeModule>::cast(native_module())->get();
}
void WasmCompiledModule::ResetGCModel(Isolate* isolate,
@@ -1148,7 +1392,7 @@ void WasmCompiledModule::ResetGCModel(Isolate* isolate,
DisallowHeapAllocation no_gc;
TRACE("Resetting %d\n", compiled_module->instance_id());
Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->ptr_to_code_table();
+ Object* fct_obj = compiled_module->code_table();
if (fct_obj != nullptr && fct_obj != undefined) {
// Patch code to update memory references, global references, and function
// table references.
@@ -1157,28 +1401,19 @@ void WasmCompiledModule::ResetGCModel(Isolate* isolate,
// Reset function tables.
if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->ptr_to_function_tables();
- FixedArray* signature_tables = compiled_module->ptr_to_signature_tables();
+ FixedArray* function_tables = compiled_module->function_tables();
FixedArray* empty_function_tables =
- compiled_module->ptr_to_empty_function_tables();
- FixedArray* empty_signature_tables =
- compiled_module->ptr_to_empty_signature_tables();
+ compiled_module->empty_function_tables();
if (function_tables != empty_function_tables) {
DCHECK_EQ(function_tables->length(), empty_function_tables->length());
for (int i = 0, e = function_tables->length(); i < e; ++i) {
GlobalHandleAddress func_addr =
WasmCompiledModule::GetTableValue(function_tables, i);
- GlobalHandleAddress sig_addr =
- WasmCompiledModule::GetTableValue(signature_tables, i);
code_specialization.RelocatePointer(
func_addr,
WasmCompiledModule::GetTableValue(empty_function_tables, i));
- code_specialization.RelocatePointer(
- sig_addr,
- WasmCompiledModule::GetTableValue(empty_signature_tables, i));
}
- compiled_module->set_ptr_to_function_tables(empty_function_tables);
- compiled_module->set_ptr_to_signature_tables(empty_signature_tables);
+ compiled_module->set_function_tables(empty_function_tables);
}
}
@@ -1226,8 +1461,10 @@ void WasmCompiledModule::Reset(Isolate* isolate,
compiled_module->reset_next_instance();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (native_module == nullptr) return;
+ native_module->SetExecutable(false);
+
TRACE("Resetting %zu\n", native_module->instance_id);
- if (trap_handler::UseTrapHandler()) {
+ if (compiled_module->use_trap_handler()) {
for (uint32_t i = native_module->num_imported_functions(),
e = native_module->FunctionCount();
i < e; ++i) {
@@ -1258,23 +1495,16 @@ void WasmCompiledModule::Reset(Isolate* isolate,
if (native_module->function_tables().size() > 0) {
std::vector<GlobalHandleAddress>& function_tables =
native_module->function_tables();
- std::vector<GlobalHandleAddress>& signature_tables =
- native_module->signature_tables();
std::vector<GlobalHandleAddress>& empty_function_tables =
native_module->empty_function_tables();
- std::vector<GlobalHandleAddress>& empty_signature_tables =
- native_module->empty_signature_tables();
if (function_tables != empty_function_tables) {
DCHECK_EQ(function_tables.size(), empty_function_tables.size());
for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
code_specialization.RelocatePointer(function_tables[i],
empty_function_tables[i]);
- code_specialization.RelocatePointer(signature_tables[i],
- empty_signature_tables[i]);
}
native_module->function_tables() = empty_function_tables;
- native_module->signature_tables() = empty_signature_tables;
}
}
@@ -1283,7 +1513,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
i < end; ++i) {
wasm::WasmCode* code = native_module->GetCode(i);
// Skip lazy compile stubs.
- if (code == nullptr || code->kind() != wasm::WasmCode::Function) continue;
+ if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) continue;
bool changed = code_specialization.ApplyToWasmCode(WasmCodeWrapper(code),
SKIP_ICACHE_FLUSH);
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
@@ -1295,17 +1525,15 @@ void WasmCompiledModule::Reset(Isolate* isolate,
}
}
-MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
wasm::WireBytesRef ref) {
// TODO(wasm): cache strings from modules if it's a performance win.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, module_bytes, ref);
+ Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
+ return ExtractUtf8StringFromModuleBytes(isolate, module_bytes, ref);
}
-MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<SeqOneByteString> module_bytes,
wasm::WireBytesRef ref) {
DCHECK_GE(module_bytes->length(), ref.end_offset());
@@ -1370,7 +1598,7 @@ void WasmCompiledModule::PrintInstancesChain() {
PrintF("->%d", current->instance_id());
}
if (!current->has_next_instance()) break;
- current = current->ptr_to_next_instance();
+ current = current->next_instance();
}
PrintF("\n");
#endif
@@ -1379,8 +1607,8 @@ void WasmCompiledModule::PrintInstancesChain() {
void WasmCompiledModule::InsertInChain(WasmModuleObject* module) {
DisallowHeapAllocation no_gc;
WasmCompiledModule* original = module->compiled_module();
- set_ptr_to_next_instance(original);
- original->set_ptr_to_prev_instance(this);
+ set_next_instance(original);
+ original->set_prev_instance(this);
set_weak_wasm_module(original->weak_wasm_module());
}
@@ -1401,7 +1629,7 @@ void WasmCompiledModule::RemoveFromChain() {
void WasmCompiledModule::OnWasmModuleDecodingComplete(
Handle<WasmSharedModuleData> shared) {
- set_shared(shared);
+ set_shared(*shared);
}
void WasmCompiledModule::ReinitializeAfterDeserialization(
@@ -1417,7 +1645,7 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
}
size_t function_table_count =
- compiled_module->module()->function_tables.size();
+ compiled_module->shared()->module()->function_tables.size();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (function_table_count > 0) {
@@ -1425,34 +1653,23 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
+ Handle<FixedArray> function_tables;
if (!FLAG_wasm_jit_to_native) {
DCHECK(compiled_module->has_function_tables());
- DCHECK(compiled_module->has_signature_tables());
- DCHECK(compiled_module->has_empty_signature_tables());
- DCHECK(compiled_module->has_empty_function_tables());
+ function_tables =
+ handle(compiled_module->empty_function_tables(), isolate);
} else {
DCHECK_GT(native_module->function_tables().size(), 0);
- DCHECK_GT(native_module->signature_tables().size(), 0);
- DCHECK_EQ(native_module->empty_signature_tables().size(),
- native_module->function_tables().size());
- DCHECK_EQ(native_module->empty_function_tables().size(),
- native_module->function_tables().size());
}
for (size_t i = 0; i < function_table_count; ++i) {
Handle<Object> global_func_table_handle =
isolate->global_handles()->Create(isolate->heap()->undefined_value());
- Handle<Object> global_sig_table_handle =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
GlobalHandleAddress new_func_table = global_func_table_handle.address();
- GlobalHandleAddress new_sig_table = global_sig_table_handle.address();
if (!FLAG_wasm_jit_to_native) {
- SetTableValue(isolate, compiled_module->empty_function_tables(),
- static_cast<int>(i), new_func_table);
- SetTableValue(isolate, compiled_module->empty_signature_tables(),
- static_cast<int>(i), new_sig_table);
+ SetTableValue(isolate, function_tables, static_cast<int>(i),
+ new_func_table);
} else {
native_module->empty_function_tables()[i] = new_func_table;
- native_module->empty_signature_tables()[i] = new_sig_table;
}
}
}
@@ -1463,38 +1680,31 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
-uint32_t WasmCompiledModule::default_mem_size() const {
- return initial_pages() * WasmModule::kPageSize;
-}
-
-MaybeHandle<String> WasmCompiledModule::GetModuleNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- WasmModule* module = compiled_module->module();
+MaybeHandle<String> WasmSharedModuleData::GetModuleNameOrNull(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared) {
+ WasmModule* module = shared->module();
if (!module->name.is_set()) return {};
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, module->name);
+ return ExtractUtf8StringFromModuleBytes(isolate, shared, module->name);
}
-MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+MaybeHandle<String> WasmSharedModuleData::GetFunctionNameOrNull(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
uint32_t func_index) {
- DCHECK_LT(func_index, compiled_module->module()->functions.size());
- WasmFunction& function = compiled_module->module()->functions[func_index];
+ DCHECK_LT(func_index, shared->module()->functions.size());
+ WasmFunction& function = shared->module()->functions[func_index];
if (!function.name.is_set()) return {};
- return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, function.name);
+ return ExtractUtf8StringFromModuleBytes(isolate, shared, function.name);
}
-Handle<String> WasmCompiledModule::GetFunctionName(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+Handle<String> WasmSharedModuleData::GetFunctionName(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
uint32_t func_index) {
- MaybeHandle<String> name =
- GetFunctionNameOrNull(isolate, compiled_module, func_index);
+ MaybeHandle<String> name = GetFunctionNameOrNull(isolate, shared, func_index);
if (!name.is_null()) return name.ToHandleChecked();
return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
}
-Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
+Vector<const uint8_t> WasmSharedModuleData::GetRawFunctionName(
uint32_t func_index) {
DCHECK_GT(module()->functions.size(), func_index);
WasmFunction& function = module()->functions[func_index];
@@ -1505,14 +1715,14 @@ Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
function.name.length());
}
-int WasmCompiledModule::GetFunctionOffset(uint32_t func_index) {
+int WasmSharedModuleData::GetFunctionOffset(uint32_t func_index) {
std::vector<WasmFunction>& functions = module()->functions;
if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
DCHECK_GE(kMaxInt, functions[func_index].code.offset());
return static_cast<int>(functions[func_index].code.offset());
}
-int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
+int WasmSharedModuleData::GetContainingFunction(uint32_t byte_offset) {
std::vector<WasmFunction>& functions = module()->functions;
// Binary search for a function containing the given position.
@@ -1537,8 +1747,8 @@ int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
return left;
}
-bool WasmCompiledModule::GetPositionInfo(uint32_t position,
- Script::PositionInfo* info) {
+bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
+ Script::PositionInfo* info) {
int func_index = GetContainingFunction(position);
if (func_index < 0) return false;
@@ -1551,234 +1761,25 @@ bool WasmCompiledModule::GetPositionInfo(uint32_t position,
return true;
}
-namespace {
-
-enum AsmJsOffsetTableEntryLayout {
- kOTEByteOffset,
- kOTECallPosition,
- kOTENumberConvPosition,
- kOTESize
-};
-
-Handle<ByteArray> GetDecodedAsmJsOffsetTable(
- Handle<WasmCompiledModule> compiled_module, Isolate* isolate) {
- DCHECK(compiled_module->is_asm_js());
- Handle<ByteArray> offset_table(
- compiled_module->shared()->asm_js_offset_table(), isolate);
-
- // The last byte in the asm_js_offset_tables ByteArray tells whether it is
- // still encoded (0) or decoded (1).
- enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
- int table_type = offset_table->get(offset_table->length() - 1);
- DCHECK(table_type == Encoded || table_type == Decoded);
- if (table_type == Decoded) return offset_table;
-
- wasm::AsmJsOffsetsResult asm_offsets;
- {
- DisallowHeapAllocation no_gc;
- const byte* bytes_start = offset_table->GetDataStartAddress();
- const byte* bytes_end = bytes_start + offset_table->length() - 1;
- asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
- }
- // Wasm bytes must be valid and must contain asm.js offset table.
- DCHECK(asm_offsets.ok());
- DCHECK_GE(kMaxInt, asm_offsets.val.size());
- int num_functions = static_cast<int>(asm_offsets.val.size());
- int num_imported_functions =
- static_cast<int>(compiled_module->module()->num_imported_functions);
- DCHECK_EQ(compiled_module->module()->functions.size(),
- static_cast<size_t>(num_functions) + num_imported_functions);
- int num_entries = 0;
- for (int func = 0; func < num_functions; ++func) {
- size_t new_size = asm_offsets.val[func].size();
- DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
- num_entries += static_cast<int>(new_size);
- }
- // One byte to encode that this is a decoded table.
- DCHECK_GE(kMaxInt,
- 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
- int total_size = 1 + num_entries * kOTESize * kIntSize;
- Handle<ByteArray> decoded_table =
- isolate->factory()->NewByteArray(total_size, TENURED);
- decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
- compiled_module->shared()->set_asm_js_offset_table(*decoded_table);
-
- int idx = 0;
- std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
- for (int func = 0; func < num_functions; ++func) {
- std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
- asm_offsets.val[func];
- if (func_asm_offsets.empty()) continue;
- int func_offset = wasm_funs[num_imported_functions + func].code.offset();
- for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
- // Byte offsets must be strictly monotonously increasing:
- DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
- decoded_table->get_int(idx - kOTESize));
- decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
- decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
- decoded_table->set_int(idx + kOTENumberConvPosition,
- e.source_position_number_conversion);
- idx += kOTESize;
- }
- }
- DCHECK_EQ(total_size, idx * kIntSize + 1);
- return decoded_table;
-}
-
-} // namespace
-
-int WasmCompiledModule::GetSourcePosition(
- Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
- uint32_t byte_offset, bool is_at_number_conversion) {
- Isolate* isolate = compiled_module->GetIsolate();
- const WasmModule* module = compiled_module->module();
-
- if (!module->is_asm_js()) {
- // for non-asm.js modules, we just add the function's start offset
- // to make a module-relative position.
- return byte_offset + compiled_module->GetFunctionOffset(func_index);
- }
-
- // asm.js modules have an additional offset table that must be searched.
- Handle<ByteArray> offset_table =
- GetDecodedAsmJsOffsetTable(compiled_module, isolate);
-
- DCHECK_LT(func_index, module->functions.size());
- uint32_t func_code_offset = module->functions[func_index].code.offset();
- uint32_t total_offset = func_code_offset + byte_offset;
-
- // Binary search for the total byte offset.
- int left = 0; // inclusive
- int right = offset_table->length() / kIntSize / kOTESize; // exclusive
- DCHECK_LT(left, right);
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- int mid_entry = offset_table->get_int(kOTESize * mid);
- DCHECK_GE(kMaxInt, mid_entry);
- if (static_cast<uint32_t>(mid_entry) <= total_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // There should be an entry for each position that could show up on the stack
- // trace:
- DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
- int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
- return offset_table->get_int(kOTESize * left + idx);
-}
-
-v8::debug::WasmDisassembly WasmCompiledModule::DisassembleFunction(
- int func_index) {
- DisallowHeapAllocation no_gc;
-
- if (func_index < 0 ||
- static_cast<uint32_t>(func_index) >= module()->functions.size())
- return {};
-
- SeqOneByteString* module_bytes_str = module_bytes();
- Vector<const byte> module_bytes(module_bytes_str->GetChars(),
- module_bytes_str->length());
-
- std::ostringstream disassembly_os;
- v8::debug::WasmDisassembly::OffsetTable offset_table;
-
- PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
- disassembly_os, &offset_table);
-
- return {disassembly_os.str(), std::move(offset_table)};
-}
-
-bool WasmCompiledModule::GetPossibleBreakpoints(
- const v8::debug::Location& start, const v8::debug::Location& end,
- std::vector<v8::debug::BreakLocation>* locations) {
- DisallowHeapAllocation no_gc;
-
- std::vector<WasmFunction>& functions = module()->functions;
- if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
- (!end.IsEmpty() &&
- (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
- return false;
-
- // start_func_index, start_offset and end_func_index is inclusive.
- // end_offset is exclusive.
- // start_offset and end_offset are module-relative byte offsets.
- uint32_t start_func_index = start.GetLineNumber();
- if (start_func_index >= functions.size()) return false;
- int start_func_len = functions[start_func_index].code.length();
- if (start.GetColumnNumber() > start_func_len) return false;
- uint32_t start_offset =
- functions[start_func_index].code.offset() + start.GetColumnNumber();
- uint32_t end_func_index;
- uint32_t end_offset;
- if (end.IsEmpty()) {
- // Default: everything till the end of the Script.
- end_func_index = static_cast<uint32_t>(functions.size() - 1);
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- // If end is specified: Use it and check for valid input.
- end_func_index = static_cast<uint32_t>(end.GetLineNumber());
-
- // Special case: Stop before the start of the next function. Change to: Stop
- // at the end of the function before, such that we don't disassemble the
- // next function also.
- if (end.GetColumnNumber() == 0 && end_func_index > 0) {
- --end_func_index;
- end_offset = functions[end_func_index].code.end_offset();
- } else {
- if (end_func_index >= functions.size()) return false;
- end_offset =
- functions[end_func_index].code.offset() + end.GetColumnNumber();
- if (end_offset > functions[end_func_index].code.end_offset())
- return false;
- }
- }
-
- AccountingAllocator alloc;
- Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = module_bytes()->GetChars();
-
- for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
- ++func_idx) {
- WasmFunction& func = functions[func_idx];
- if (func.code.length() == 0) continue;
-
- wasm::BodyLocalDecls locals(&tmp);
- wasm::BytecodeIterator iterator(module_start + func.code.offset(),
- module_start + func.code.end_offset(),
- &locals);
- DCHECK_LT(0u, locals.encoded_size);
- for (uint32_t offset : iterator.offsets()) {
- uint32_t total_offset = func.code.offset() + offset;
- if (total_offset >= end_offset) {
- DCHECK_EQ(end_func_index, func_idx);
- break;
- }
- if (total_offset < start_offset) continue;
- locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
- }
- }
- return true;
-}
bool WasmCompiledModule::SetBreakPoint(
Handle<WasmCompiledModule> compiled_module, int* position,
Handle<Object> break_point_object) {
Isolate* isolate = compiled_module->GetIsolate();
+ Handle<WasmSharedModuleData> shared(compiled_module->shared(), isolate);
// Find the function for this breakpoint.
- int func_index = compiled_module->GetContainingFunction(*position);
+ int func_index = shared->GetContainingFunction(*position);
if (func_index < 0) return false;
- WasmFunction& func = compiled_module->module()->functions[func_index];
+ WasmFunction& func = shared->module()->functions[func_index];
int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
- DCHECK(IsBreakablePosition(compiled_module, func_index, offset_in_func));
+ DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
// Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(compiled_module->shared(), *position,
- break_point_object);
+ WasmSharedModuleData::AddBreakpoint(shared, *position, break_point_object);
// Iterate over all instances of this module and tell them to set this new
// breakpoint.
@@ -1792,27 +1793,6 @@ bool WasmCompiledModule::SetBreakPoint(
return true;
}
-MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
- Isolate* isolate = GetIsolate();
- if (!shared()->has_breakpoint_infos()) return {};
-
- Handle<FixedArray> breakpoint_infos(shared()->breakpoint_infos(), isolate);
- int insert_pos =
- FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
- if (insert_pos >= breakpoint_infos->length()) return {};
-
- Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
- isolate);
- if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
- Handle<BreakPointInfo> breakpoint_info =
- Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
- if (breakpoint_info->source_position() != position) return {};
-
- Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
- isolate);
- return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
-}
-
void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
MaybeHandle<WeakCell> weak_instance,
int func_index) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index ec53b8ac2a..cecc11f83f 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -12,6 +12,7 @@
#include "src/objects.h"
#include "src/objects/script.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -26,7 +27,6 @@ namespace wasm {
class InterpretedFrame;
class NativeModule;
class WasmCode;
-class WasmInterpreter;
struct WasmModule;
class SignatureMap;
typedef Address GlobalHandleAddress;
@@ -69,9 +69,7 @@ struct WasmContext {
byte* globals_start = nullptr;
inline void SetRawMemory(void* mem_start, size_t mem_size) {
- DCHECK_LE(mem_size, std::min(wasm::kV8MaxWasmMemoryPages,
- wasm::kSpecMaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize);
+ DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
@@ -131,13 +129,17 @@ class WasmTableObject : public JSObject {
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions);
- static Handle<FixedArray> AddDispatchTable(
- Isolate* isolate, Handle<WasmTableObject> table,
- Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
+ static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
+ Handle<WasmInstanceObject> instance,
+ int table_index,
+ Handle<FixedArray> function_table);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
+
+ static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
+ wasm::FunctionSig* sig,
+ Handle<Object> code_or_foreign);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -177,8 +179,6 @@ class WasmMemoryObject : public JSObject {
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
- static void SetupNewBufferWithSameBackingStore(
- Isolate* isolate, Handle<WasmMemoryObject> memory_object, uint32_t size);
};
// A WebAssembly.Instance JavaScript-level object.
@@ -194,7 +194,6 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
DECL_OPTIONAL_ACCESSORS(function_tables, FixedArray)
- DECL_OPTIONAL_ACCESSORS(signature_tables, FixedArray)
// FixedArray of all instances whose code was imported
DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
@@ -209,7 +208,6 @@ class WasmInstanceObject : public JSObject {
kDebugInfoIndex,
kTableObjectIndex,
kFunctionTablesIndex,
- kSignatureTablesIndex,
kDirectlyCalledInstancesIndex,
kJsImportsTableIndex,
kFieldCount
@@ -224,7 +222,6 @@ class WasmInstanceObject : public JSObject {
DEF_OFFSET(DebugInfo)
DEF_OFFSET(TableObject)
DEF_OFFSET(FunctionTables)
- DEF_OFFSET(SignatureTables)
DEF_OFFSET(DirectlyCalledInstances)
DEF_OFFSET(JsImportsTable)
@@ -237,13 +234,9 @@ class WasmInstanceObject : public JSObject {
static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmCompiledModule>);
- int32_t GetMemorySize();
-
static int32_t GrowMemory(Isolate*, Handle<WasmInstanceObject>,
uint32_t pages);
- uint32_t GetMaxMemoryPages();
-
// Assumed to be called with a code object associated to a wasm module
// instance. Intended to be called from runtime functions. Returns nullptr on
// failing to get owning instance.
@@ -325,6 +318,77 @@ class WasmSharedModuleData : public FixedArray {
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table);
+ // Get the module name, if set. Returns an empty handle otherwise.
+ static MaybeHandle<String> GetModuleNameOrNull(Isolate*,
+ Handle<WasmSharedModuleData>);
+
+ // Get the function name of the function identified by the given index.
+ // Returns a null handle if the function is unnamed or the name is not a valid
+ // UTF-8 string.
+ static MaybeHandle<String> GetFunctionNameOrNull(Isolate*,
+ Handle<WasmSharedModuleData>,
+ uint32_t func_index);
+
+ // Get the function name of the function identified by the given index.
+ // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+ // valid UTF-8 string.
+ static Handle<String> GetFunctionName(Isolate*, Handle<WasmSharedModuleData>,
+ uint32_t func_index);
+
+ // Get the raw bytes of the function name of the function identified by the
+ // given index.
+ // Meant to be used for debugging or frame printing.
+ // Does not allocate, hence gc-safe.
+ Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+
+ // Return the byte offset of the function identified by the given index.
+ // The offset will be relative to the start of the module bytes.
+ // Returns -1 if the function index is invalid.
+ int GetFunctionOffset(uint32_t func_index);
+
+ // Returns the function containing the given byte offset.
+ // Returns -1 if the byte offset is not contained in any function of this
+ // module.
+ int GetContainingFunction(uint32_t byte_offset);
+
+ // Translate from byte offset in the module to function number and byte offset
+ // within that function, encoded as line and column in the position info.
+ // Returns true if the position is valid inside this module, false otherwise.
+ bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
+
+ // Get the source position from a given function index and byte offset,
+ // for either asm.js or pure WASM modules.
+ static int GetSourcePosition(Handle<WasmSharedModuleData>,
+ uint32_t func_index, uint32_t byte_offset,
+ bool is_at_number_conversion);
+
+ // Compute the disassembly of a wasm function.
+ // Returns the disassembly string and a list of <byte_offset, line, column>
+ // entries, mapping wasm byte offsets to line and column in the disassembly.
+ // The list is guaranteed to be ordered by the byte_offset.
+ // Returns an empty string and empty vector if the function index is invalid.
+ debug::WasmDisassembly DisassembleFunction(int func_index);
+
+ // Extract a portion of the wire bytes as UTF-8 string.
+ // Returns a null handle if the respective bytes do not form a valid UTF-8
+ // string.
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmSharedModuleData>, wasm::WireBytesRef ref);
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<SeqOneByteString> module_bytes,
+ wasm::WireBytesRef ref);
+
+ // Get a list of all possible breakpoints within a given range of this module.
+ bool GetPossibleBreakpoints(const debug::Location& start,
+ const debug::Location& end,
+ std::vector<debug::BreakLocation>* locations);
+
+ // Return an empty handle if no breakpoint is hit at that location, or a
+ // FixedArray with all hit breakpoint objects.
+ static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
+ Handle<WasmSharedModuleData>,
+ int position);
+
DECL_OPTIONAL_ACCESSORS(lazy_compilation_orchestrator, Foreign)
};
@@ -360,16 +424,13 @@ class WasmCompiledModule : public FixedArray {
#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
public: \
- inline Handle<TYPE> NAME() const; \
- inline MaybeHandle<TYPE> maybe_##NAME() const; \
- inline TYPE* maybe_ptr_to_##NAME() const; \
- inline TYPE* ptr_to_##NAME() const; \
+ inline TYPE* maybe_##NAME() const; \
+ inline TYPE* NAME() const; \
inline bool has_##NAME() const; \
inline void reset_##NAME(); \
\
SETTER_MODIFIER: \
- inline void set_##NAME(Handle<TYPE> value); \
- inline void set_ptr_to_##NAME(TYPE* value);
+ inline void set_##NAME(TYPE* value);
#define WCM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
@@ -392,7 +453,7 @@ class WasmCompiledModule : public FixedArray {
public) \
\
public: \
- inline Handle<TYPE> NAME() const;
+ inline TYPE* NAME() const;
// Add values here if they are required for creating new instances or
// for deserialization, and if they are serializable.
@@ -400,7 +461,7 @@ class WasmCompiledModule : public FixedArray {
// we embed the generated code with a value, then we track that value here.
#define CORE_WCM_PROPERTY_TABLE(MACRO) \
MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
- MACRO(OBJECT, Context, native_context) \
+ MACRO(WEAK_LINK, Context, native_context) \
MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
MACRO(OBJECT, FixedArray, weak_exported_functions) \
MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
@@ -410,7 +471,8 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
- MACRO(OBJECT, FixedArray, lazy_compile_data)
+ MACRO(OBJECT, FixedArray, lazy_compile_data) \
+ MACRO(SMALL_CONST_NUMBER, bool, use_trap_handler)
#define GC_WCM_PROPERTY_TABLE(MACRO) \
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
@@ -418,8 +480,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(OBJECT, FixedArray, function_tables) \
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
- MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages)
+ MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
@@ -449,7 +510,7 @@ class WasmCompiledModule : public FixedArray {
Isolate* isolate, wasm::WasmModule* module, Handle<FixedArray> code_table,
Handle<FixedArray> export_wrappers,
const std::vector<wasm::GlobalHandleAddress>& function_tables,
- const std::vector<wasm::GlobalHandleAddress>& signature_tables);
+ bool use_trap_hander);
static Handle<WasmCompiledModule> Clone(Isolate* isolate,
Handle<WasmCompiledModule> module);
@@ -458,8 +519,6 @@ class WasmCompiledModule : public FixedArray {
// TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
- uint32_t default_mem_size() const;
-
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
@@ -470,14 +529,6 @@ class WasmCompiledModule : public FixedArray {
#undef DECLARATION
public:
-// Allow to call method on WasmSharedModuleData also on this object.
-#define FORWARD_SHARED(type, name) inline type name();
- FORWARD_SHARED(SeqOneByteString*, module_bytes)
- FORWARD_SHARED(wasm::WasmModule*, module)
- FORWARD_SHARED(Script*, script)
- FORWARD_SHARED(bool, is_asm_js)
-#undef FORWARD_SHARED
-
static bool IsWasmCompiledModule(Object* obj);
void PrintInstancesChain();
@@ -485,73 +536,6 @@ class WasmCompiledModule : public FixedArray {
static void ReinitializeAfterDeserialization(Isolate*,
Handle<WasmCompiledModule>);
- // Get the module name, if set. Returns an empty handle otherwise.
- static MaybeHandle<String> GetModuleNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
-
- // Get the function name of the function identified by the given index.
- // Returns a null handle if the function is unnamed or the name is not a valid
- // UTF-8 string.
- static MaybeHandle<String> GetFunctionNameOrNull(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index);
-
- // Get the function name of the function identified by the given index.
- // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
- // valid UTF-8 string.
- static Handle<String> GetFunctionName(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index);
-
- // Get the raw bytes of the function name of the function identified by the
- // given index.
- // Meant to be used for debugging or frame printing.
- // Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
-
- // Return the byte offset of the function identified by the given index.
- // The offset will be relative to the start of the module bytes.
- // Returns -1 if the function index is invalid.
- int GetFunctionOffset(uint32_t func_index);
-
- // Returns the function containing the given byte offset.
- // Returns -1 if the byte offset is not contained in any function of this
- // module.
- int GetContainingFunction(uint32_t byte_offset);
-
- // Translate from byte offset in the module to function number and byte offset
- // within that function, encoded as line and column in the position info.
- // Returns true if the position is valid inside this module, false otherwise.
- bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
-
- // Get the source position from a given function index and byte offset,
- // for either asm.js or pure WASM modules.
- static int GetSourcePosition(Handle<WasmCompiledModule> compiled_module,
- uint32_t func_index, uint32_t byte_offset,
- bool is_at_number_conversion);
-
- // Compute the disassembly of a wasm function.
- // Returns the disassembly string and a list of <byte_offset, line, column>
- // entries, mapping wasm byte offsets to line and column in the disassembly.
- // The list is guaranteed to be ordered by the byte_offset.
- // Returns an empty string and empty vector if the function index is invalid.
- debug::WasmDisassembly DisassembleFunction(int func_index);
-
- // Extract a portion of the wire bytes as UTF-8 string.
- // Returns a null handle if the respective bytes do not form a valid UTF-8
- // string.
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- wasm::WireBytesRef ref);
- static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Handle<SeqOneByteString> module_bytes,
- wasm::WireBytesRef ref);
-
- // Get a list of all possible breakpoints within a given range of this module.
- bool GetPossibleBreakpoints(const debug::Location& start,
- const debug::Location& end,
- std::vector<debug::BreakLocation>* locations);
-
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
// The passed position might be modified to point to the next breakable
@@ -561,10 +545,6 @@ class WasmCompiledModule : public FixedArray {
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
Handle<Object> break_point_object);
- // Return an empty handle if no breakpoint is hit at that location, or a
- // FixedArray with all hit breakpoint objects.
- MaybeHandle<FixedArray> CheckBreakPoints(int position);
-
inline void ReplaceCodeTableForTesting(
std::vector<wasm::WasmCode*>&& testing_table);
@@ -645,8 +625,8 @@ class WasmDebugInfo : public FixedArray {
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
Address frame_pointer);
- std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
- Address frame_pointer, int frame_index);
+ wasm::WasmInterpreter::FramePtr GetInterpretedFrame(Address frame_pointer,
+ int frame_index);
// Unwind the interpreted stack belonging to the passed interpreter entry
// frame.
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 5188d7801e..b503aa1a5e 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -49,6 +49,9 @@ namespace wasm {
#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
+#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##Sat##SRC, str "_u:sat/" src_suffix) \
+ CASE_##RES##_OP(S##name##Sat##SRC, str "_s:sat/" src_suffix)
#define CASE_L32_OP(name, str) \
CASE_SIGN_OP(I32, name##8, str "8") \
CASE_SIGN_OP(I32, name##16, str "16") \
@@ -98,6 +101,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ // TODO(kschimpf): Add I64 versions of saturating conversions.
+ CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
@@ -275,6 +282,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_UNSIGNED_OP
#undef CASE_ALL_SIGN_OP
#undef CASE_CONVERT_OP
+#undef CASE_CONVERT_SAT_OP
#undef CASE_L32_OP
#undef CASE_U32_OP
@@ -340,11 +348,12 @@ enum WasmOpcodeSig : byte {
FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
};
#undef DECLARE_SIG_ENUM
-
-#define DECLARE_SIG(name, ...) \
- constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
- constexpr FunctionSig kSig_##name( \
- 1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
+#define DECLARE_SIG(name, ...) \
+ constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
+ constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
+ constexpr FunctionSig kSig_##name( \
+ kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
+ kTypes_##name + (1 - kReturnsCount_##name));
FOREACH_SIGNATURE(DECLARE_SIG)
#undef DECLARE_SIG
@@ -376,7 +385,7 @@ struct GetAsmJsOpcodeSigIndex {
struct GetSimdOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
@@ -384,12 +393,20 @@ struct GetSimdOpcodeSigIndex {
struct GetAtomicOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
};
+struct GetNumericOpcodeSigIndex {
+ constexpr WasmOpcodeSig operator()(byte opcode) const {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+ }
+};
+
constexpr std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
base::make_array<256>(GetOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
@@ -398,20 +415,26 @@ constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
base::make_array<256>(GetSimdOpcodeSigIndex{});
constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
base::make_array<256>(GetAtomicOpcodeSigIndex{});
+constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
+ base::make_array<256>(GetNumericOpcodeSigIndex{});
} // namespace
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
- if (opcode >> 8 == kSimdPrefix) {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimdExprSigTable[opcode & 0xff]]);
- } else if (opcode >> 8 == kAtomicPrefix) {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
- } else {
- DCHECK_GT(kSimpleExprSigTable.size(), opcode);
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
+ switch (opcode >> 8) {
+ case kSimdPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimdExprSigTable[opcode & 0xFF]]);
+ case kAtomicPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xFF]]);
+ case kNumericPrefix:
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kNumericExprSigTable[opcode & 0xFF]]);
+ default:
+ DCHECK_GT(kSimpleExprSigTable.size(), opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
}
}
@@ -421,6 +444,14 @@ FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
+// Define constexpr arrays.
+constexpr uint8_t LoadType::kLoadSizeLog2[];
+constexpr ValueType LoadType::kValueType[];
+constexpr MachineType LoadType::kMemType[];
+constexpr uint8_t StoreType::kStoreSizeLog2[];
+constexpr ValueType StoreType::kValueType[];
+constexpr MachineRepresentation StoreType::kMemRep[];
+
int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index e8cb348b53..9f8232c902 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -9,25 +9,12 @@
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/signature.h"
+#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
namespace wasm {
-// Binary encoding of the module header.
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x01;
-
-// Binary encoding of local types.
-enum ValueTypeCode : uint8_t {
- kLocalVoid = 0x40,
- kLocalI32 = 0x7f,
- kLocalI64 = 0x7e,
- kLocalF32 = 0x7d,
- kLocalF64 = 0x7c,
- kLocalS128 = 0x7b
-};
-
// We reuse the internal machine type to represent WebAssembly types.
// A typedef improves readability without adding a whole new type system.
using ValueType = MachineRepresentation;
@@ -45,9 +32,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
using WasmName = Vector<const char>;
-using WasmCodePosition = int;
-constexpr WasmCodePosition kNoCodePosition = -1;
-
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
V(Unreachable, 0x00, _) \
@@ -102,15 +86,15 @@ constexpr WasmCodePosition kNoCodePosition = -1;
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
- V(I32StoreMem, 0x36, i_ii) \
- V(I64StoreMem, 0x37, l_il) \
- V(F32StoreMem, 0x38, f_if) \
- V(F64StoreMem, 0x39, d_id) \
- V(I32StoreMem8, 0x3a, i_ii) \
- V(I32StoreMem16, 0x3b, i_ii) \
- V(I64StoreMem8, 0x3c, l_il) \
- V(I64StoreMem16, 0x3d, l_il) \
- V(I64StoreMem32, 0x3e, l_il)
+ V(I32StoreMem, 0x36, v_ii) \
+ V(I64StoreMem, 0x37, v_il) \
+ V(F32StoreMem, 0x38, v_if) \
+ V(F64StoreMem, 0x39, v_id) \
+ V(I32StoreMem8, 0x3a, v_ii) \
+ V(I32StoreMem16, 0x3b, v_ii) \
+ V(I64StoreMem8, 0x3c, v_il) \
+ V(I64StoreMem16, 0x3d, v_il) \
+ V(I64StoreMem32, 0x3e, v_il)
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
@@ -413,15 +397,22 @@ constexpr WasmCodePosition kNoCodePosition = -1;
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd80, s_i) \
- V(S128StoreMem, 0xfd81, s_is)
+ V(S128StoreMem, 0xfd81, v_is)
+
+#define FOREACH_NUMERIC_OPCODE(V) \
+ V(I32SConvertSatF32, 0xfc00, i_f) \
+ V(I32UConvertSatF32, 0xfc01, i_f) \
+ V(I32SConvertSatF64, 0xfc02, i_d) \
+ V(I32UConvertSatF64, 0xfc03, i_d)
+// TODO(kschimpf): Add remaining i64 numeric opcodes.
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
- V(I32AtomicStore, 0xfe17, i_ii) \
- V(I32AtomicStore8U, 0xfe19, i_ii) \
- V(I32AtomicStore16U, 0xfe1a, i_ii) \
+ V(I32AtomicStore, 0xfe17, v_ii) \
+ V(I32AtomicStore8U, 0xfe19, v_ii) \
+ V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
@@ -457,38 +448,42 @@ constexpr WasmCodePosition kNoCodePosition = -1;
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
- FOREACH_ATOMIC_OPCODE(V)
+ FOREACH_ATOMIC_OPCODE(V) \
+ FOREACH_NUMERIC_OPCODE(V)
// All signatures.
-#define FOREACH_SIGNATURE(V) \
- FOREACH_SIMD_SIGNATURE(V) \
- V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
- V(i_i, kWasmI32, kWasmI32) \
- V(i_v, kWasmI32) \
- V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
- V(i_f, kWasmI32, kWasmF32) \
- V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
- V(i_d, kWasmI32, kWasmF64) \
- V(i_l, kWasmI32, kWasmI64) \
- V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
- V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
- V(l_l, kWasmI64, kWasmI64) \
- V(l_i, kWasmI64, kWasmI32) \
- V(l_f, kWasmI64, kWasmF32) \
- V(l_d, kWasmI64, kWasmF64) \
- V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
- V(f_f, kWasmF32, kWasmF32) \
- V(f_d, kWasmF32, kWasmF64) \
- V(f_i, kWasmF32, kWasmI32) \
- V(f_l, kWasmF32, kWasmI64) \
- V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
- V(d_d, kWasmF64, kWasmF64) \
- V(d_f, kWasmF64, kWasmF32) \
- V(d_i, kWasmF64, kWasmI32) \
- V(d_l, kWasmF64, kWasmI64) \
- V(d_id, kWasmF64, kWasmI32, kWasmF64) \
- V(f_if, kWasmF32, kWasmI32, kWasmF32) \
- V(l_il, kWasmI64, kWasmI32, kWasmI64) \
+#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
+ V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_i, kWasmI32, kWasmI32) \
+ V(i_v, kWasmI32) \
+ V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+ V(i_f, kWasmI32, kWasmF32) \
+ V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+ V(i_d, kWasmI32, kWasmF64) \
+ V(i_l, kWasmI32, kWasmI64) \
+ V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+ V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+ V(l_l, kWasmI64, kWasmI64) \
+ V(l_i, kWasmI64, kWasmI32) \
+ V(l_f, kWasmI64, kWasmF32) \
+ V(l_d, kWasmI64, kWasmF64) \
+ V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+ V(f_f, kWasmF32, kWasmF32) \
+ V(f_d, kWasmF32, kWasmF64) \
+ V(f_i, kWasmF32, kWasmI32) \
+ V(f_l, kWasmF32, kWasmI64) \
+ V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+ V(d_d, kWasmF64, kWasmF64) \
+ V(d_f, kWasmF64, kWasmF32) \
+ V(d_i, kWasmF64, kWasmI32) \
+ V(d_l, kWasmF64, kWasmI64) \
+ V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
+ V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
+ V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+ V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
+ V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+ V(v_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
@@ -501,6 +496,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
+ V(Numeric, 0xfc) \
V(Simd, 0xfd) \
V(Atomic, 0xfe)
@@ -532,6 +528,117 @@ enum TrapReason {
#undef DECLARE_ENUM
};
+// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
+// constexpr support.
+#define FOREACH_LOAD_TYPE(V) \
+ V(I32, , Int32, 2) \
+ V(I32, 8S, Int8, 0) \
+ V(I32, 8U, Uint8, 0) \
+ V(I32, 16S, Int16, 1) \
+ V(I32, 16U, Uint16, 1) \
+ V(I64, , Int64, 3) \
+ V(I64, 8S, Int8, 0) \
+ V(I64, 8U, Uint8, 0) \
+ V(I64, 16S, Int16, 1) \
+ V(I64, 16U, Uint16, 1) \
+ V(I64, 32S, Int32, 2) \
+ V(I64, 32U, Uint32, 2) \
+ V(F32, , Float32, 2) \
+ V(F64, , Float64, 3) \
+ V(S128, , Simd128, 4)
+
+class LoadType {
+ public:
+ enum LoadTypeValue : uint8_t {
+#define DEF_ENUM(type, suffix, ...) k##type##Load##suffix,
+ FOREACH_LOAD_TYPE(DEF_ENUM)
+#undef DEF_ENUM
+ };
+
+ // Allow implicit convertion of the enum value to this wrapper.
+ constexpr LoadType(LoadTypeValue val) // NOLINT(runtime/explicit)
+ : val_(val) {}
+
+ constexpr LoadTypeValue value() const { return val_; }
+ constexpr unsigned size_log_2() const { return kLoadSizeLog2[val_]; }
+ constexpr unsigned size() const { return 1 << size_log_2(); }
+ constexpr ValueType value_type() const { return kValueType[val_]; }
+ constexpr MachineType mem_type() const { return kMemType[val_]; }
+
+ private:
+ const LoadTypeValue val_;
+
+ static constexpr uint8_t kLoadSizeLog2[] = {
+#define LOAD_SIZE(_, __, ___, size) size,
+ FOREACH_LOAD_TYPE(LOAD_SIZE)
+#undef LOAD_SIZE
+ };
+
+ static constexpr ValueType kValueType[] = {
+#define VALUE_TYPE(type, ...) kWasm##type,
+ FOREACH_LOAD_TYPE(VALUE_TYPE)
+#undef VALUE_TYPE
+ };
+
+ static constexpr MachineType kMemType[] = {
+#define MEMTYPE(_, __, memtype, ___) MachineType::memtype(),
+ FOREACH_LOAD_TYPE(MEMTYPE)
+#undef MEMTYPE
+ };
+};
+
+#define FOREACH_STORE_TYPE(V) \
+ V(I32, , Word32, 2) \
+ V(I32, 8, Word8, 0) \
+ V(I32, 16, Word16, 1) \
+ V(I64, , Word64, 3) \
+ V(I64, 8, Word8, 0) \
+ V(I64, 16, Word16, 1) \
+ V(I64, 32, Word32, 2) \
+ V(F32, , Float32, 2) \
+ V(F64, , Float64, 3) \
+ V(S128, , Simd128, 4)
+
+class StoreType {
+ public:
+ enum StoreTypeValue : uint8_t {
+#define DEF_ENUM(type, suffix, ...) k##type##Store##suffix,
+ FOREACH_STORE_TYPE(DEF_ENUM)
+#undef DEF_ENUM
+ };
+
+ // Allow implicit convertion of the enum value to this wrapper.
+ constexpr StoreType(StoreTypeValue val) // NOLINT(runtime/explicit)
+ : val_(val) {}
+
+ constexpr StoreTypeValue value() const { return val_; }
+ constexpr unsigned size_log_2() const { return kStoreSizeLog2[val_]; }
+ constexpr unsigned size() const { return 1 << size_log_2(); }
+ constexpr ValueType value_type() const { return kValueType[val_]; }
+ constexpr ValueType mem_rep() const { return kMemRep[val_]; }
+
+ private:
+ const StoreTypeValue val_;
+
+ static constexpr uint8_t kStoreSizeLog2[] = {
+#define STORE_SIZE(_, __, ___, size) size,
+ FOREACH_STORE_TYPE(STORE_SIZE)
+#undef STORE_SIZE
+ };
+
+ static constexpr ValueType kValueType[] = {
+#define VALUE_TYPE(type, ...) kWasm##type,
+ FOREACH_STORE_TYPE(VALUE_TYPE)
+#undef VALUE_TYPE
+ };
+
+ static constexpr MachineRepresentation kMemRep[] = {
+#define MEMREP(_, __, memrep, ___) MachineRepresentation::k##memrep,
+ FOREACH_STORE_TYPE(MEMREP)
+#undef MEMREP
+ };
+};
+
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
@@ -548,7 +655,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static const char* TrapReasonMessage(TrapReason reason);
static byte MemSize(MachineType type) {
- return 1 << ElementSizeLog2Of(type.representation());
+ return MemSize(type.representation());
}
static byte MemSize(ValueType type) { return 1 << ElementSizeLog2Of(type); }
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 337692b595..4466672f37 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -9,10 +9,12 @@
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
#include "src/version.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -21,20 +23,7 @@
namespace v8 {
namespace internal {
namespace wasm {
-
namespace {
-void SetRawTargetData(RelocInfo* rinfo, uint32_t value) {
- if (rinfo->target_address_size() == sizeof(uint32_t)) {
- *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = value;
- return;
- } else {
- DCHECK_EQ(rinfo->target_address_size(), sizeof(intptr_t));
- DCHECK_EQ(rinfo->target_address_size(), 8);
- *(reinterpret_cast<intptr_t*>(rinfo->target_address_address())) =
- static_cast<intptr_t>(value);
- return;
- }
-}
class Writer {
public:
@@ -108,31 +97,91 @@ class Reader {
Vector<const byte> buffer_;
};
-} // namespace
+constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-size_t WasmSerializedFormatVersion::GetVersionSize() { return kVersionSize; }
-
-bool WasmSerializedFormatVersion::WriteVersion(Isolate* isolate,
- Vector<byte> buffer) {
- if (buffer.size() < GetVersionSize()) return false;
+void WriteVersion(Isolate* isolate, Vector<byte> buffer) {
+ DCHECK_GE(buffer.size(), kVersionSize);
Writer writer(buffer);
writer.Write(SerializedData::ComputeMagicNumber(
ExternalReferenceTable::instance(isolate)));
writer.Write(Version::Hash());
writer.Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer.Write(FlagList::Hash());
- return true;
}
-bool WasmSerializedFormatVersion::IsSupportedVersion(
- Isolate* isolate, const Vector<const byte> buffer) {
+bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> buffer) {
if (buffer.size() < kVersionSize) return false;
byte version[kVersionSize];
- CHECK(WriteVersion(isolate, {version, kVersionSize}));
+ WriteVersion(isolate, {version, kVersionSize});
if (memcmp(buffer.start(), version, kVersionSize) == 0) return true;
return false;
}
+} // namespace
+
+enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
+
+class V8_EXPORT_PRIVATE NativeModuleSerializer {
+ public:
+ explicit NativeModuleSerializer(Isolate*, const NativeModule*);
+ size_t Measure() const;
+ size_t Write(Vector<byte>);
+ bool IsDone() const { return state_ == Done; }
+
+ private:
+ size_t MeasureHeader() const;
+ static size_t GetCodeHeaderSize();
+ size_t MeasureCode(const WasmCode*) const;
+ size_t MeasureCopiedStubs() const;
+ FixedArray* GetHandlerTable(const WasmCode*) const;
+ ByteArray* GetSourcePositions(const WasmCode*) const;
+
+ void BufferHeader();
+ // we buffer all the stubs because they are small
+ void BufferCopiedStubs();
+ void BufferCodeInAllocatedScratch(const WasmCode*);
+ void BufferCurrentWasmCode();
+ size_t DrainBuffer(Vector<byte> dest);
+ uint32_t EncodeBuiltinOrStub(Address);
+
+ Isolate* const isolate_ = nullptr;
+ const NativeModule* const native_module_ = nullptr;
+ SerializationSection state_ = Init;
+ uint32_t index_ = 0;
+ std::vector<byte> scratch_;
+ Vector<byte> remaining_;
+ // wasm and copied stubs reverse lookup
+ std::map<Address, uint32_t> wasm_targets_lookup_;
+ // immovable builtins and runtime entries lookup
+ std::map<Address, uint32_t> reference_table_lookup_;
+ std::map<Address, uint32_t> stub_lookup_;
+ std::map<Address, uint32_t> builtin_lookup_;
+};
+
+class V8_EXPORT_PRIVATE NativeModuleDeserializer {
+ public:
+ explicit NativeModuleDeserializer(Isolate*, NativeModule*);
+ // Currently, we don't support streamed reading, yet albeit the
+ // API suggests that.
+ bool Read(Vector<const byte>);
+
+ private:
+ void ExpectHeader();
+ void Expect(size_t size);
+ bool ReadHeader();
+ bool ReadCode();
+ bool ReadStubs();
+ Address GetTrampolineOrStubFromTag(uint32_t);
+
+ Isolate* const isolate_ = nullptr;
+ NativeModule* const native_module_ = nullptr;
+ std::vector<byte> scratch_;
+ std::vector<Address> stubs_;
+ Vector<const byte> unread_;
+ size_t current_expectation_ = 0;
+ uint32_t index_ = 0;
+};
+
NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
const NativeModule* module)
: isolate_(isolate), native_module_(module) {
@@ -164,8 +213,8 @@ size_t NativeModuleSerializer::MeasureHeader() const {
sizeof(
uint32_t) + // imported fcts - i.e. index of first wasm function
sizeof(uint32_t) + // table count
- native_module_->specialization_data_.function_tables.size() *
- 2 // 2 same-sized tables, containing pointers
+ native_module_->specialization_data_.function_tables.size()
+ // function table, containing pointers
* sizeof(GlobalHandleAddress);
}
@@ -182,7 +231,6 @@ void NativeModuleSerializer::BufferHeader() {
e = native_module_->specialization_data_.function_tables.size();
i < e; ++i) {
writer.Write(native_module_->specialization_data_.function_tables[i]);
- writer.Write(native_module_->specialization_data_.signature_tables[i]);
}
}
@@ -279,7 +327,7 @@ void NativeModuleSerializer::BufferCopiedStubs() {
FixedArray* NativeModuleSerializer::GetHandlerTable(
const WasmCode* code) const {
- if (code->kind() != WasmCode::Function) return nullptr;
+ if (code->kind() != WasmCode::kFunction) return nullptr;
uint32_t index = code->index();
// We write the address, the size, and then copy the code as-is, followed
// by reloc info, followed by handler table and source positions.
@@ -294,7 +342,7 @@ FixedArray* NativeModuleSerializer::GetHandlerTable(
ByteArray* NativeModuleSerializer::GetSourcePositions(
const WasmCode* code) const {
- if (code->kind() != WasmCode::Function) return nullptr;
+ if (code->kind() != WasmCode::kFunction) return nullptr;
uint32_t index = code->index();
Object* source_positions_entry =
native_module_->compiled_module()->source_positions()->get(
@@ -372,17 +420,17 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
case RelocInfo::CODE_TARGET: {
Address orig_target = orig_iter.rinfo()->target_address();
uint32_t tag = EncodeBuiltinOrStub(orig_target);
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::WASM_CALL: {
Address orig_target = orig_iter.rinfo()->wasm_call_address();
uint32_t tag = wasm_targets_lookup_[orig_target];
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
case RelocInfo::RUNTIME_ENTRY: {
Address orig_target = orig_iter.rinfo()->target_address();
uint32_t tag = reference_table_lookup_[orig_target];
- SetRawTargetData(iter.rinfo(), tag);
+ SetWasmCalleeTag(iter.rinfo(), tag);
} break;
default:
UNREACHABLE();
@@ -402,7 +450,7 @@ uint32_t NativeModuleSerializer::EncodeBuiltinOrStub(Address address) {
DCHECK(stub_iter != stub_lookup_.end());
uint32_t id = stub_iter->second;
DCHECK_LT(id, std::numeric_limits<uint16_t>::max());
- tag = id & 0x0000ffff;
+ tag = id & 0x0000FFFF;
}
return tag;
}
@@ -423,15 +471,20 @@ size_t NativeModuleSerializer::Write(Vector<byte> dest) {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
index_ = native_module_->num_imported_functions();
- BufferCurrentWasmCode();
- state_ = CodeSection;
+ if (index_ < native_module_->FunctionCount()) {
+ BufferCurrentWasmCode();
+ state_ = CodeSection;
+ } else {
+ state_ = Done;
+ }
}
break;
}
case CodeSection: {
dest = dest + DrainBuffer(dest);
if (remaining_.size() == 0) {
- if (++index_ < native_module_->FunctionCount()) {
+ ++index_; // Move to next code object.
+ if (index_ < native_module_->FunctionCount()) {
BufferCurrentWasmCode();
} else {
state_ = Done;
@@ -448,18 +501,22 @@ size_t NativeModuleSerializer::Write(Vector<byte> dest) {
}
// static
-std::pair<std::unique_ptr<byte[]>, size_t>
-NativeModuleSerializer::SerializeWholeModule(
+std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ if (!FLAG_wasm_jit_to_native) {
+ std::unique_ptr<ScriptData> script_data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate,
+ compiled_module);
+ script_data->ReleaseDataOwnership();
+ size_t size = static_cast<size_t>(script_data->length());
+ return {std::unique_ptr<const byte[]>(script_data->data()), size};
+ }
NativeModule* native_module = compiled_module->GetNativeModule();
NativeModuleSerializer serializer(isolate, native_module);
- size_t version_size = WasmSerializedFormatVersion::GetVersionSize();
+ size_t version_size = kVersionSize;
size_t buff_size = serializer.Measure() + version_size;
std::unique_ptr<byte[]> ret(new byte[buff_size]);
- if (!WasmSerializedFormatVersion::WriteVersion(isolate,
- {ret.get(), buff_size})) {
- return {};
- }
+ WriteVersion(isolate, {ret.get(), buff_size});
size_t written =
serializer.Write({ret.get() + version_size, buff_size - version_size});
@@ -500,18 +557,14 @@ bool NativeModuleDeserializer::ReadHeader() {
if (!ok) return false;
size_t table_count = reader.Read<uint32_t>();
- std::vector<GlobalHandleAddress> sigs(table_count);
std::vector<GlobalHandleAddress> funcs(table_count);
for (size_t i = 0; i < table_count; ++i) {
funcs[i] = reader.Read<GlobalHandleAddress>();
- sigs[i] = reader.Read<GlobalHandleAddress>();
}
- native_module_->signature_tables() = sigs;
native_module_->function_tables() = funcs;
// resize, so that from here on the native module can be
// asked about num_function_tables().
native_module_->empty_function_tables().resize(table_count);
- native_module_->empty_signature_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
@@ -558,7 +611,7 @@ bool NativeModuleDeserializer::ReadCode() {
}
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, Just(index_),
- WasmCode::Function, constant_pool_offset, stack_slot_count,
+ WasmCode::kFunction, constant_pool_offset, stack_slot_count,
safepoint_table_offset, protected_instructions, is_liftoff);
if (ret == nullptr) return false;
native_module_->SetCodeTable(index_, ret);
@@ -576,14 +629,15 @@ bool NativeModuleDeserializer::ReadCode() {
// We only expect {undefined}. We check for that when we add code.
iter.rinfo()->set_target_object(isolate_->heap()->undefined_value(),
SKIP_WRITE_BARRIER);
+ break;
}
case RelocInfo::CODE_TARGET: {
- uint32_t tag = *(reinterpret_cast<uint32_t*>(
- iter.rinfo()->target_address_address()));
+ uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target = GetTrampolineOrStubFromTag(tag);
iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
- } break;
+ break;
+ }
case RelocInfo::RUNTIME_ENTRY: {
uint32_t orig_target = static_cast<uint32_t>(
reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
@@ -591,7 +645,8 @@ bool NativeModuleDeserializer::ReadCode() {
ExternalReferenceTable::instance(isolate_)->address(orig_target);
iter.rinfo()->set_target_runtime_entry(
nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
- } break;
+ break;
+ }
default:
break;
}
@@ -624,25 +679,35 @@ bool NativeModuleDeserializer::ReadCode() {
}
Address NativeModuleDeserializer::GetTrampolineOrStubFromTag(uint32_t tag) {
- if ((tag & 0x0000ffff) == 0) {
+ if ((tag & 0x0000FFFF) == 0) {
int builtin_id = static_cast<int>(tag >> 16);
v8::internal::Code* builtin = isolate_->builtins()->builtin(builtin_id);
return native_module_->GetLocalAddressFor(handle(builtin));
} else {
- DCHECK_EQ(tag & 0xffff0000, 0);
+ DCHECK_EQ(tag & 0xFFFF0000, 0);
return stubs_[tag];
}
}
-MaybeHandle<WasmCompiledModule> NativeModuleDeserializer::DeserializeFullBuffer(
+MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
+ if (!FLAG_wasm_jit_to_native) {
+ ScriptData script_data(data.start(), data.length());
+ Handle<FixedArray> compiled_module;
+ if (!WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate, &script_data, wire_bytes)
+ .ToHandle(&compiled_module)) {
+ return {};
+ }
+ return Handle<WasmCompiledModule>::cast(compiled_module);
+ }
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
return {};
}
- if (!WasmSerializedFormatVersion::IsSupportedVersion(isolate, data)) {
+ if (!IsSupportedVersion(isolate, data)) {
return {};
}
- data = data + WasmSerializedFormatVersion::GetVersionSize();
+ data = data + kVersionSize;
ModuleResult decode_result =
SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false,
i::wasm::kWasmOrigin);
@@ -669,9 +734,11 @@ MaybeHandle<WasmCompiledModule> NativeModuleDeserializer::DeserializeFullBuffer(
static_cast<int>(export_wrappers_size), TENURED);
Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate, shared->module(), isolate->factory()->NewFixedArray(0, TENURED),
- export_wrappers, {}, {});
+ isolate, shared->module(), isolate->factory()->empty_fixed_array(),
+ export_wrappers, std::vector<wasm::GlobalHandleAddress>(),
+ trap_handler::IsTrapHandlerEnabled());
compiled_module->OnWasmModuleDecodingComplete(shared);
+ script->set_wasm_compiled_module(*compiled_module);
NativeModuleDeserializer deserializer(isolate,
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 40025c23cf..9c0e9ce10a 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -5,89 +5,17 @@
#ifndef V8_WASM_SERIALIZATION_H_
#define V8_WASM_SERIALIZATION_H_
-#include "src/wasm/wasm-heap.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
namespace wasm {
-class WasmSerializedFormatVersion {
- public:
- static size_t GetVersionSize();
- static bool WriteVersion(Isolate* isolate, Vector<byte>);
- static bool IsSupportedVersion(Isolate* isolate, const Vector<const byte>);
+std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
- private:
- static constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-};
-
-enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
-
-class V8_EXPORT_PRIVATE NativeModuleSerializer {
- public:
- explicit NativeModuleSerializer(Isolate*, const NativeModule*);
- size_t Measure() const;
- size_t Write(Vector<byte>);
- bool IsDone() const { return state_ == Done; }
- static std::pair<std::unique_ptr<byte[]>, size_t> SerializeWholeModule(
- Isolate*, Handle<WasmCompiledModule>);
-
- private:
- size_t MeasureHeader() const;
- static size_t GetCodeHeaderSize();
- size_t MeasureCode(const WasmCode*) const;
- size_t MeasureCopiedStubs() const;
- FixedArray* GetHandlerTable(const WasmCode*) const;
- ByteArray* GetSourcePositions(const WasmCode*) const;
-
- void BufferHeader();
- // we buffer all the stubs because they are small
- void BufferCopiedStubs();
- void BufferCodeInAllocatedScratch(const WasmCode*);
- void BufferCurrentWasmCode();
- size_t DrainBuffer(Vector<byte> dest);
- uint32_t EncodeBuiltinOrStub(Address);
-
- Isolate* const isolate_ = nullptr;
- const NativeModule* const native_module_ = nullptr;
- SerializationSection state_ = Init;
- uint32_t index_ = 0;
- std::vector<byte> scratch_;
- Vector<byte> remaining_;
- // wasm and copied stubs reverse lookup
- std::map<Address, uint32_t> wasm_targets_lookup_;
- // immovable builtins and runtime entries lookup
- std::map<Address, uint32_t> reference_table_lookup_;
- std::map<Address, uint32_t> stub_lookup_;
- std::map<Address, uint32_t> builtin_lookup_;
-};
-
-class V8_EXPORT_PRIVATE NativeModuleDeserializer {
- public:
- explicit NativeModuleDeserializer(Isolate*, NativeModule*);
- // Currently, we don't support streamed reading, yet albeit the
- // API suggests that.
- bool Read(Vector<const byte>);
- static MaybeHandle<WasmCompiledModule> DeserializeFullBuffer(
- Isolate*, Vector<const byte> data, Vector<const byte> wire_bytes);
-
- private:
- void ExpectHeader();
- void Expect(size_t size);
- bool ReadHeader();
- bool ReadCode();
- bool ReadStubs();
- Address GetTrampolineOrStubFromTag(uint32_t);
-
- Isolate* const isolate_ = nullptr;
- NativeModule* const native_module_ = nullptr;
- std::vector<byte> scratch_;
- std::vector<Address> stubs_;
- Vector<const byte> unread_;
- size_t current_expectation_ = 0;
- uint32_t index_ = 0;
-};
+MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
+ Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 8b12b0867e..a75a8ddd74 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -279,18 +279,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : nullptr;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
@@ -303,7 +291,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
+ set_target_address_at(isolate, instruction_payload,
+ code ? code->constant_pool() : nullptr, target);
}
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
@@ -330,7 +319,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- return Assembler::target_address_at(pc_, host_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
@@ -421,7 +410,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, host_,
+ Assembler::set_target_address_at(isolate, pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 5f62e2af66..38cbfc78d9 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -38,7 +38,7 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
// directly because older assemblers do not include support for xgetbv and
// there is no easy way to conditionally compile based on the assembler
// used.
- __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
}
@@ -243,17 +243,17 @@ Operand::Operand(const Operand& operand, int32_t offset) {
rex_ = operand.rex_;
if (!is_int8(disp_value) || is_baseless) {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+ buf_[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
len_ = disp_offset + 4;
Memory::int32_at(&buf_[disp_offset]) = disp_value;
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
+ buf_[0] = (modrm & 0x3F) | 0x40; // Mode 1.
len_ = disp_offset + 1;
buf_[disp_offset] = static_cast<byte>(disp_value);
} else {
// Need no displacement.
- buf_[0] = (modrm & 0x3f); // Mode 0.
+ buf_[0] = (modrm & 0x3F); // Mode 0.
len_ = disp_offset;
}
if (has_sib) {
@@ -385,7 +385,7 @@ bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
+ if (a[0] == 0xF && a[1] == 0x1F) return true;
return false;
}
@@ -1044,7 +1044,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_modrm(dst, src);
}
@@ -1060,7 +1060,7 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_operand(dst, src);
}
@@ -1076,7 +1076,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_modrm(dst, src);
}
@@ -1092,7 +1092,7 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x40 + cc);
emit_operand(dst, src);
}
@@ -1101,13 +1101,13 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
void Assembler::cmpb_al(Immediate imm8) {
DCHECK(is_int8(imm8.value_) || is_uint8(imm8.value_));
EnsureSpace ensure_space(this);
- emit(0x3c);
+ emit(0x3C);
emit(imm8.value_);
}
void Assembler::lock() {
EnsureSpace ensure_space(this);
- emit(0xf0);
+ emit(0xF0);
}
void Assembler::cmpxchgb(const Operand& dst, Register src) {
@@ -1118,8 +1118,8 @@ void Assembler::cmpxchgb(const Operand& dst, Register src) {
} else {
emit_optional_rex_32(src, dst);
}
- emit(0x0f);
- emit(0xb0);
+ emit(0x0F);
+ emit(0xB0);
emit_operand(src, dst);
}
@@ -1127,19 +1127,26 @@ void Assembler::cmpxchgw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
- emit(0x0f);
- emit(0xb1);
+ emit(0x0F);
+ emit(0xB1);
emit_operand(src, dst);
}
void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
- emit(0x0f);
- emit(0xb1);
+ emit(0x0F);
+ emit(0xB1);
emit_operand(src, dst);
}
+void Assembler::lfence() {
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0xAE);
+ emit(0xE8);
+}
+
void Assembler::cpuid() {
EnsureSpace ensure_space(this);
emit(0x0F);
@@ -1593,7 +1600,7 @@ void Assembler::movw(const Operand& dst, Immediate imm) {
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(static_cast<byte>(imm.value_ & 0xff));
+ emit(static_cast<byte>(imm.value_ & 0xFF));
emit(static_cast<byte>(imm.value_ >> 8));
}
@@ -1950,28 +1957,28 @@ void Assembler::Nop(int n) {
emit(0x90);
return;
case 3:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x00);
return;
case 4:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x40);
emit(0x00);
return;
case 6:
emit(0x66);
case 5:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x44);
emit(0x00);
emit(0x00);
return;
case 7:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x80);
emit(0x00);
emit(0x00);
@@ -1989,8 +1996,8 @@ void Assembler::Nop(int n) {
emit(0x66);
n--;
case 8:
- emit(0x0f);
- emit(0x1f);
+ emit(0x0F);
+ emit(0x1F);
emit(0x84);
emit(0x00);
emit(0x00);
@@ -2898,10 +2905,10 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
// Avoid unnecessary SIB byte.
- emit(0xf3);
+ emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(dst, src);
} else {
emit(0x66);
@@ -3370,8 +3377,8 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3380,8 +3387,8 @@ void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3984,14 +3991,31 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::haddps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7C);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::haddps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7C);
+ emit_sse_operand(dst, src);
+}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4001,8 +4025,8 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
+ emit(0x0F);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4024,9 +4048,9 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0a);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x0A);
emit_sse_operand(dst, src);
// Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
@@ -4039,9 +4063,9 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0b);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x0B);
emit_sse_operand(dst, src);
// Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
@@ -4052,7 +4076,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x50);
emit_sse_operand(dst, src);
}
@@ -4061,7 +4085,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
void Assembler::movmskps(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x50);
emit_sse_operand(dst, src);
}
@@ -4141,7 +4165,7 @@ void Assembler::vmovd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW0);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4150,7 +4174,7 @@ void Assembler::vmovd(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4160,7 +4184,7 @@ void Assembler::vmovd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW0);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(src, dst);
}
@@ -4170,7 +4194,7 @@ void Assembler::vmovq(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
XMMRegister isrc = XMMRegister::from_code(src.code());
emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW1);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4179,7 +4203,7 @@ void Assembler::vmovq(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
- emit(0x6e);
+ emit(0x6E);
emit_sse_operand(dst, src);
}
@@ -4189,7 +4213,7 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
XMMRegister idst = XMMRegister::from_code(dst.code());
emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW1);
- emit(0x7e);
+ emit(0x7E);
emit_sse_operand(src, dst);
}
@@ -4258,7 +4282,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
- emit(0x2e);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4267,7 +4291,7 @@ void Assembler::vucomiss(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
- emit(0x2e);
+ emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -4547,6 +4571,11 @@ void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
emit(imm8);
}
+void Assembler::pause() {
+ emit(0xF3);
+ emit(0x90);
+}
+
void Assembler::minps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e5711101bd..1c838b964b 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -182,6 +182,7 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
+constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
@@ -477,10 +478,6 @@ class Assembler : public AssemblerBase {
static inline void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -1212,6 +1209,9 @@ class Assembler : public AssemblerBase {
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, XMMRegister src);
+ void haddps(XMMRegister dst, const Operand& src);
+
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
void cmpltsd(XMMRegister dst, XMMRegister src);
@@ -1905,6 +1905,9 @@ class Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8);
+ void lfence();
+ void pause();
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 60d04fcbe6..27061c1e2b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -523,12 +523,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// external reference instead of inlining the call target address directly
// in the code, because the builtin stubs may not have been generated yet
// at the time this code is generated.
- if (type() == StackFrame::CONSTRUCT_ENTRY) {
- __ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
- }
+ __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -651,7 +646,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -696,7 +691,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rbx, 0), allocation_site_map);
- __ Assert(equal, kExpectedAllocationSite);
+ __ Assert(equal, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
@@ -721,7 +716,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
@@ -796,9 +791,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in rbx or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(rbx);
@@ -895,9 +890,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+ __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -914,8 +909,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmpl(rcx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
__ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
- __ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(
+ equal,
+ AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -1076,7 +1072,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CompareRoot(return_value, Heap::kNullValueRootIndex);
__ j(equal, &ok, Label::kNear);
- __ Abort(kAPICallReturnedInvalidObject);
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
__ bind(&ok);
#endif
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 8c22e07b12..25a74b98fc 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -15,8 +15,7 @@ namespace internal {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t allocated = 0;
- byte* buffer =
- AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
+ byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
@@ -32,8 +31,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, allocated);
- CHECK(base::OS::SetPermissions(buffer, allocated,
- base::OS::MemoryPermission::kReadExecute));
+ CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index adc1b7874e..8c4d6a20e8 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -12,12 +12,14 @@
namespace v8 {
namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 5;
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
+ Label deopt_table_entry;
+ __ bind(&deopt_table_entry);
+
GeneratePrologue();
// Save all general purpose registers before messing with them.
@@ -63,7 +65,22 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(rax, Operand(rsp, kSavedRegistersAreaSize));
+
+ // address of deoptimization table
+ __ leap(rdx, Operand(&deopt_table_entry));
+
+ // rax = deopt_entry - deopt_table_entry - 5
+ __ subp(rax, rdx);
+ __ subl(rax, Immediate(5));
+
+ // rax /= 5
+ __ movl(rbx, Immediate(0xcccccccd));
+ __ imulq(rax, rbx);
+ __ shrq(rax, Immediate(0x22));
+
+ // bailout id
+ __ movl(arg_reg_3, rax);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
@@ -231,8 +248,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ pushq_imm32(i);
- __ jmp(&done);
+ __ call(&done);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 23f502ab47..247f5e889e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -327,7 +327,7 @@ class DisassemblerX64 {
byte rex_;
byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
- byte vex_byte0_; // 0xc4 or 0xc5
+ byte vex_byte0_; // 0xC4 or 0xC5
byte vex_byte1_;
byte vex_byte2_; // only for 3 bytes vex prefix
// Byte size operand override.
@@ -405,7 +405,7 @@ class DisassemblerX64 {
int vex_vreg() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
- return ~(checked >> 3) & 0xf;
+ return ~(checked >> 3) & 0xF;
}
OperandSize operand_size() {
@@ -484,7 +484,7 @@ class DisassemblerX64 {
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
- CHECK(false);
+ FATAL("'Unimplemented Instruction'");
} else {
AppendToBuffer("'Unimplemented Instruction'");
}
@@ -616,8 +616,6 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
break;
default:
UNREACHABLE();
- value = 0; // Initialize variables on all paths to satisfy the compiler.
- count = 0;
}
AppendToBuffer("%" PRIx64, value);
return count;
@@ -890,62 +888,62 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xa9:
+ case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xb9:
+ case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9b:
+ case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xab:
+ case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbb:
+ case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9d:
+ case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xad:
+ case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbd:
+ case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x9f:
+ case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xaf:
+ case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xbf:
+ case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -970,13 +968,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0x0a:
+ case 0x0A:
AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
- case 0x0b:
+ case 0x0B:
AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -1031,12 +1029,12 @@ int DisassemblerX64::AVXInstruction(byte* data) {
}
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2a:
+ case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0x2c:
+ case 0x2C:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1051,27 +1049,27 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5a:
+ case 0x5A:
AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
@@ -1098,17 +1096,17 @@ int DisassemblerX64::AVXInstruction(byte* data) {
}
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2a:
+ case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0x2c:
+ case 0x2C:
AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x2d:
+ case 0x2D:
AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1128,32 +1126,32 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5a:
+ case 0x5A:
AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5c:
+ case 0x5C:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5d:
+ case 0x5D:
AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5e:
+ case 0x5E:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x5f:
+ case 0x5F:
AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0xf0:
+ case 0xF0:
AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1165,24 +1163,24 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
const char* mnem = "?";
switch (opcode) {
- case 0xf2:
+ case 0xF2:
AppendToBuffer("andn%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf5:
+ case 0xF5:
AppendToBuffer("bzhi%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("bextr%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
- case 0xf3:
+ case 0xF3:
switch (regop) {
case 1:
mnem = "blsr";
@@ -1208,17 +1206,17 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pdep%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf6:
+ case 0xF6:
AppendToBuffer("mulx%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("shrx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -1231,12 +1229,12 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf5:
+ case 0xF5:
AppendToBuffer("pext%c %s,%s,", operand_size_code(),
NameOfCPURegister(regop), NameOfCPURegister(vvvv));
current += PrintRightOperand(current);
break;
- case 0xf7:
+ case 0xF7:
AppendToBuffer("sarx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -1249,16 +1247,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
- case 0xf0:
+ case 0xF0:
AppendToBuffer("rorx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
switch (operand_size()) {
case OPERAND_DOUBLEWORD_SIZE:
- AppendToBuffer(",%d", *current & 0x1f);
+ AppendToBuffer(",%d", *current & 0x1F);
break;
case OPERAND_QUADWORD_SIZE:
- AppendToBuffer(",%d", *current & 0x3f);
+ AppendToBuffer(",%d", *current & 0x3F);
break;
default:
UnimplementedInstruction();
@@ -1290,7 +1288,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2e:
+ case 0x2E:
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1343,7 +1341,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x2e:
+ case 0x2E:
AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
@@ -1366,7 +1364,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x6e:
+ case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
@@ -1394,7 +1392,7 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%u", *current++);
break;
- case 0x7e:
+ case 0x7E:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
@@ -1409,13 +1407,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += 1;
break;
}
- case 0xc4:
+ case 0xC4:
AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
- case 0xc5:
+ case 0xC5:
AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
@@ -1659,13 +1657,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
- } else if (third_byte == 0x0a) {
+ } else if (third_byte == 0x0A) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
- } else if (third_byte == 0x0b) {
+ } else if (third_byte == 0x0B) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
@@ -1716,7 +1714,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
} else {
get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1f) {
+ if (opcode == 0x1F) {
current++;
if (rm == 4) { // SIB byte present.
current++;
@@ -1774,17 +1772,17 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x71) {
current += 1;
AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0x72) {
current += 1;
AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0x73) {
current += 1;
AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7f);
+ *current & 0x7F);
current += 1;
} else if (opcode == 0xB1) {
current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
@@ -1973,6 +1971,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x7C) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("haddps %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -2011,11 +2014,22 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 7);
current += 1;
+ } else if (opcode == 0x6F) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x7F) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movdqu ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
@@ -2096,7 +2110,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x2e) {
+ } else if (opcode == 0x2E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
@@ -2194,6 +2208,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = true;
}
current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0xAE && (*(data + 2) & 0xF8) == 0xE8) {
+ AppendToBuffer("lfence");
+ current = data + 3;
} else {
UnimplementedInstruction();
}
@@ -2373,10 +2390,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
- data += 5;
+ int32_t imm;
+ if (operand_size() == OPERAND_WORD_SIZE) {
+ imm = *reinterpret_cast<int16_t*>(data + 1);
+ data += 3;
+ } else {
+ imm = *reinterpret_cast<int32_t*>(data + 1);
+ data += 5;
+ }
+ AppendToBuffer("%s rax,0x%x", idesc.mnem, imm);
break;
}
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 59c9532cb3..22bad696d2 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -44,8 +44,6 @@ const Register LoadDescriptor::SlotRegister() { return rax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return rdi; }
-
const Register StoreDescriptor::ReceiverRegister() { return rdx; }
const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
@@ -205,6 +203,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void AbortJSDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e305aaa1a5..5019be3727 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -186,7 +186,8 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
- Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
+ Move(kScratchRegister, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
}
Push(Immediate(static_cast<int32_t>(address)));
return;
@@ -255,8 +256,9 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(value, kZapValue, Assembler::RelocInfoNone());
- Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
+ Move(dst, reinterpret_cast<Address>(kZapValue), Assembler::RelocInfoNone());
}
}
@@ -298,15 +300,35 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kFPMode));
- pushq(object);
- pushq(address);
-
- popq(slot_parameter);
- popq(object_parameter);
+ // Prepare argument registers for calling RecordWrite
+ // slot_parameter <= address
+ // object_parameter <= object
+ if (slot_parameter != object) {
+ // Normal case
+ Move(slot_parameter, address);
+ Move(object_parameter, object);
+ } else if (object_parameter != address) {
+ // Only slot_parameter and object are the same register
+ // object_parameter <= object
+ // slot_parameter <= address
+ Move(object_parameter, object);
+ Move(slot_parameter, address);
+ } else {
+ // slot_parameter \/ address
+ // object_parameter /\ object
+ xchgq(slot_parameter, object_parameter);
+ }
LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate()));
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+
+ Smi* smi_rsa = Smi::FromEnum(remembered_set_action);
+ Smi* smi_fm = Smi::FromEnum(fp_mode);
+ Move(remembered_set_parameter, smi_rsa);
+ if (smi_rsa != smi_fm) {
+ Move(fp_mode_parameter, smi_fm);
+ } else {
+ movq(fp_mode_parameter, remembered_set_parameter);
+ }
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
@@ -366,20 +388,22 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(address, kZapValue, Assembler::RelocInfoNone());
- Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(address, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue),
+ Assembler::RelocInfoNone());
}
}
-void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, AbortReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
-void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void TurboAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, AbortReason reason) {
Label L;
j(cc, &L, Label::kNear);
Abort(reason);
@@ -401,9 +425,9 @@ void TurboAssembler::CheckStackAlignment() {
}
}
-void TurboAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -1622,9 +1646,7 @@ void TurboAssembler::Call(ExternalReference ext) {
#endif
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
-#ifdef DEBUG
DCHECK_EQ(end_position, pc_offset());
-#endif
}
void TurboAssembler::Call(const Operand& op) {
@@ -1642,9 +1664,7 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#endif
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
-#ifdef DEBUG
DCHECK_EQ(pc_offset(), end_position);
-#endif
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
@@ -1653,9 +1673,52 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
-#ifdef DEBUG
DCHECK_EQ(end_position, pc_offset());
+}
+
+void TurboAssembler::RetpolineCall(Register reg) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ movq(Operand(rsp, 0), reg);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
+#ifdef DEBUG
+// TODO(titzer): CallSize() is wrong for RetpolineCalls
+// int end_position = pc_offset() + CallSize(destination);
#endif
+ Move(kScratchRegister, destination, rmode);
+ RetpolineCall(kScratchRegister);
+ // TODO(titzer): CallSize() is wrong for RetpolineCalls
+ // DCHECK_EQ(pc_offset(), end_position);
+}
+
+void TurboAssembler::RetpolineJump(Register reg) {
+ Label setup_target, capture_spec;
+
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ movq(Operand(rsp, 0), reg);
+ ret(0);
}
void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
@@ -1919,9 +1982,11 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Push(Immediate(0)); // Padding.
+
// Link the current handler as the next handler.
ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
isolate());
@@ -1999,7 +2064,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), kOperandIsASmi);
+ Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
}
@@ -2007,7 +2072,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, kOperandIsNotASmi);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
@@ -2015,27 +2080,27 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertSmi(const Operand& object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, kOperandIsNotASmi);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFixedArray);
Push(object);
CmpObjectType(object, FIXED_ARRAY_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFixedArray);
+ Check(equal, AbortReason::kOperandIsNotAFixedArray);
}
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
DCHECK_NE(int32_register, kScratchRegister);
- movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
+ movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
- Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
+ Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
}
@@ -2043,11 +2108,11 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
CmpObjectType(object, JS_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotAFunction);
+ Check(equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -2055,18 +2120,18 @@ void MacroAssembler::AssertFunction(Register object) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
- Check(equal, kOperandIsNotABoundFunction);
+ Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
Register map = object;
@@ -2084,7 +2149,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
bind(&do_check);
// Restore generator object to register and perform assertion
Pop(object);
- Check(equal, kOperandIsNotAGeneratorObject);
+ Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
@@ -2094,7 +2159,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
- Assert(equal, kExpectedUndefinedOrCell);
+ Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
}
@@ -2161,7 +2226,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
if (FLAG_debug_code) {
cmpp(rsp, new_sp_reg);
- Check(below, kStackAccessBelowStackPointer);
+ Check(below, AbortReason::kStackAccessBelowStackPointer);
}
// Copy return address from caller's frame to current frame's return address
@@ -2387,7 +2452,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpp(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, kCodeObjectNotProperlyPatched);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
}
}
@@ -2395,7 +2460,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
- Check(equal, kStackFrameTypesMustMatch);
+ Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
popq(rbp);
@@ -2548,7 +2613,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
Operand context_operand = ExternalOperand(context_address);
movp(rsi, context_operand);
#ifdef DEBUG
- movp(context_operand, Immediate(0));
+ movp(context_operand, Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 73650f36e5..4ceab2cf9c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -345,6 +345,11 @@ class TurboAssembler : public Assembler {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
+ void RetpolineCall(Register reg);
+ void RetpolineCall(Address destination, RelocInfo::Mode rmode);
+
+ void RetpolineJump(Register reg);
+
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
call(target, rmode);
}
@@ -383,21 +388,21 @@ class TurboAssembler : public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
+ void Assert(Condition cc, AbortReason reason);
// Like Assert(), but without condition.
// Use --debug_code to enable.
- void AssertUnreachable(BailoutReason reason);
+ void AssertUnreachable(AbortReason reason);
// Abort execution if a 64 bit register containing a 32 bit payload does not
// have zeros in the top 32 bits, enabled via --debug-code.
void AssertZeroExtended(Register reg);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
+ void Check(Condition cc, AbortReason reason);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(AbortReason msg);
// Check that the stack is aligned.
void CheckStackAlignment();
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
index 701842eab3..4797ae91bb 100644
--- a/deps/v8/src/x64/simulator-x64.cc
+++ b/deps/v8/src/x64/simulator-x64.cc
@@ -3,3 +3,5 @@
// found in the LICENSE file.
#include "src/x64/simulator-x64.h"
+
+// Since there is no simulator for the x64 architecture this file is empty.
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index f1351c88cf..ce9f3592dc 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -5,46 +5,6 @@
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the x64 architecture the only thing we can
-// do is to call the entry directly.
-// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on x64 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
+// Since there is no simulator for the x64 architecture this file is empty.
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index ee841fb4af..8ef141b4c1 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -10,6 +10,8 @@
#include <malloc.h> // NOLINT
#endif
+#include "src/allocation.h"
+
namespace v8 {
namespace internal {
@@ -82,11 +84,7 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) {
}
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
- void* memory = malloc(bytes);
- if (memory == nullptr) {
- V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
- memory = malloc(bytes);
- }
+ void* memory = AllocWithRetry(bytes);
if (memory != nullptr) {
base::AtomicWord current =
base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 78d25cc644..5e9fd0440a 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -41,6 +41,11 @@ class ZoneVector : public std::vector<T, ZoneAllocator<T>> {
ZoneVector(size_t size, T def, Zone* zone)
: std::vector<T, ZoneAllocator<T>>(size, def, ZoneAllocator<T>(zone)) {}
+ // Constructs a new vector and fills it with the contents of the given
+ // initializer list.
+ ZoneVector(std::initializer_list<T> list, Zone* zone)
+ : std::vector<T, ZoneAllocator<T>>(list, ZoneAllocator<T>(zone)) {}
+
// Constructs a new vector and fills it with the contents of the range
// [first, last).
template <class InputIt>
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index de8146de05..470f4c4177 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -42,7 +42,8 @@ const size_t kASanRedzoneBytes = 0;
} // namespace
-Zone::Zone(AccountingAllocator* allocator, const char* name)
+Zone::Zone(AccountingAllocator* allocator, const char* name,
+ SegmentSize segment_size)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
@@ -50,7 +51,8 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
allocator_(allocator),
segment_head_(nullptr),
name_(name),
- sealed_(false) {
+ sealed_(false),
+ segment_size_(segment_size) {
allocator_->ZoneCreation(this);
}
@@ -148,6 +150,9 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory("Zone");
return nullptr;
}
+ if (segment_size_ == SegmentSize::kLarge) {
+ new_size = kMaximumSegmentSize;
+ }
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
} else if (new_size > kMaximumSegmentSize) {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index c8c1fe3515..e15e3d116e 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -34,9 +34,13 @@ namespace internal {
//
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
+
+enum class SegmentSize { kLarge, kDefault };
+
class V8_EXPORT_PRIVATE Zone final {
public:
- Zone(AccountingAllocator* allocator, const char* name);
+ Zone(AccountingAllocator* allocator, const char* name,
+ SegmentSize segment_size = SegmentSize::kDefault);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -109,6 +113,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
bool sealed_;
+ SegmentSize segment_size_;
};
// ZoneObject is an abstraction that helps define classes of objects